aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKJ Tsanaktsidis <kj@kjtsanaktsidis.id.au>2023-11-27 16:49:18 +1100
committerPeter Zhu <peter@peterzhu.ca>2023-12-07 10:19:35 -0500
commit5d832d16d9878766dfe527934ef1ad64698b9bf8 (patch)
treefa78919c9fcb3f2e3d98704b0f25c09918fbf50c
parent10bc0bd4ab9b886b77ba0cb4cdb2fa121a84c835 (diff)
downloadruby-5d832d16d9878766dfe527934ef1ad64698b9bf8.tar.gz
Add objspace_each_pages to gc.c
This works like objspace_each_obj, except instead of being called with the start & end address of each page, it's called with the page structure itself. [Bug #20022]
-rw-r--r--gc.c50
1 files changed, 38 insertions, 12 deletions
diff --git a/gc.c b/gc.c
index 8ab7a73862..f97f1cbbae 100644
--- a/gc.c
+++ b/gc.c
@@ -3885,6 +3885,7 @@ Init_gc_stress(void)
}
typedef int each_obj_callback(void *, void *, size_t, void *);
+typedef int each_page_callback(struct heap_page *, void *);
static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
@@ -3893,7 +3894,8 @@ struct each_obj_data {
rb_objspace_t *objspace;
bool reenable_incremental;
- each_obj_callback *callback;
+ each_obj_callback *each_obj_callback;
+ each_page_callback *each_page_callback;
void *data;
struct heap_page **pages[SIZE_POOL_COUNT];
@@ -3967,9 +3969,15 @@ objspace_each_objects_try(VALUE arg)
uintptr_t pstart = (uintptr_t)page->start;
uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
- if (!__asan_region_is_poisoned((void *)pstart, pend - pstart) &&
- (*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
- break;
+ if (!__asan_region_is_poisoned((void *)pstart, pend - pstart)) {
+ if (data->each_obj_callback &&
+ (*data->each_obj_callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
+ break;
+ }
+ if (data->each_page_callback &&
+ (*data->each_page_callback)(page, data->data)) {
+ break;
+ }
}
page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
@@ -4024,9 +4032,10 @@ rb_objspace_each_objects(each_obj_callback *callback, void *data)
}
static void
-objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
+objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
{
/* Disable incremental GC */
+ rb_objspace_t *objspace = each_obj_data->objspace;
bool reenable_incremental = FALSE;
if (protected) {
reenable_incremental = !objspace->flags.dont_incremental;
@@ -4035,18 +4044,35 @@ objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void
objspace->flags.dont_incremental = TRUE;
}
+ each_obj_data->reenable_incremental = reenable_incremental;
+ memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
+ memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
+ rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
+ objspace_each_objects_ensure, (VALUE)each_obj_data);
+}
+
+static void
+objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
+{
struct each_obj_data each_obj_data = {
.objspace = objspace,
- .reenable_incremental = reenable_incremental,
-
- .callback = callback,
+ .each_obj_callback = callback,
+ .each_page_callback = NULL,
.data = data,
+ };
+ objspace_each_exec(protected, &each_obj_data);
+}
- .pages = {NULL},
- .pages_counts = {0},
+static void
+objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
+{
+ struct each_obj_data each_obj_data = {
+ .objspace = objspace,
+ .each_obj_callback = NULL,
+ .each_page_callback = callback,
+ .data = data,
};
- rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
- objspace_each_objects_ensure, (VALUE)&each_obj_data);
+ objspace_each_exec(protected, &each_obj_data);
}
void