aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--NEWS.md8
-rw-r--r--gc.c147
-rw-r--r--gc.rb9
3 files changed, 15 insertions, 149 deletions
diff --git a/NEWS.md b/NEWS.md
index ea69498778..6856a29517 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -108,14 +108,6 @@ Outstanding ones only.
* Symbol#to_proc now returns a lambda Proc.
[[Feature #16260]]
-* GC
-
- * Modified method
-
- * GC.start now takes an option `compact: true` to compact the heap.
- For example `GC.start(compact: true)` will have the same effect as
- `GC.compact`.
-
## Stdlib updates
Outstanding ones only.
diff --git a/gc.c b/gc.c
index 547f9b2035..3b2e7cdfa0 100644
--- a/gc.c
+++ b/gc.c
@@ -480,7 +480,6 @@ typedef enum {
GPR_FLAG_HAVE_FINALIZE = 0x4000,
GPR_FLAG_IMMEDIATE_MARK = 0x8000,
GPR_FLAG_FULL_MARK = 0x10000,
- GPR_FLAG_COMPACT = 0x20000,
GPR_DEFAULT_REASON =
(GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
@@ -635,8 +634,6 @@ typedef struct rb_heap_struct {
struct heap_page *using_page;
struct list_head pages;
struct heap_page *sweeping_page; /* iterator for .pages */
- struct heap_page *compact_cursor;
- VALUE moved_list;
#if GC_ENABLE_INCREMENTAL_MARK
struct heap_page *pooled_pages;
#endif
@@ -670,7 +667,6 @@ typedef struct rb_objspace {
unsigned int gc_stressful: 1;
unsigned int has_hook: 1;
unsigned int during_minor_gc : 1;
- unsigned int compact : 1;
#if GC_ENABLE_INCREMENTAL_MARK
unsigned int during_incremental_marking : 1;
#endif
@@ -4182,71 +4178,17 @@ gc_setup_mark_bits(struct heap_page *page)
memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
}
-static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
-static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, VALUE moved_list);
-
-static short
-try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE vp)
-{
- struct heap_page * cursor = heap->compact_cursor;
-
- while(1) {
- bits_t *mark_bits = cursor->mark_bits;
- RVALUE * p = cursor->start;
- RVALUE * offset = p - NUM_IN_PAGE(p);
-
- /* Find an object to move and move it. Movable objects must be
- * marked, so we iterate using the marking bitmap */
- for (size_t i = 0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
- bits_t bits = mark_bits[i];
-
- if (bits) {
- p = offset + i * BITS_BITLENGTH;
-
- do {
- if (bits & 1) {
- if (gc_is_moveable_obj(objspace, (VALUE)p)) {
- heap->moved_list = gc_move(objspace, (VALUE)p, vp, heap->moved_list);
- return 1;
- }
- }
- p++;
- bits >>= 1;
- } while (bits);
- }
- }
-
- struct heap_page * next;
-
- next = list_prev(&heap->pages, cursor, page_node);
-
- // Cursors have met, lets quit
- if (next == sweep_page) {
- break;
- } else {
- heap->compact_cursor = next;
- cursor = next;
- }
- }
-
- return 0;
-}
-
static inline int
gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
{
int i;
- int empty_slots = 0, freed_slots = 0, final_slots = 0, moved_slots = 0;
+ int empty_slots = 0, freed_slots = 0, final_slots = 0;
RVALUE *p, *pend,*offset;
bits_t *bits, bitset;
gc_report(2, objspace, "page_sweep: start.\n");
sweep_page->flags.before_sweep = FALSE;
- if (heap->compact_cursor && sweep_page == heap->compact_cursor) {
- /* The compaction cursor and sweep page met, so we need to quit compacting */
- heap->compact_cursor = NULL;
- }
p = sweep_page->start; pend = p + sweep_page->total_slots;
offset = p - NUM_IN_PAGE(p);
@@ -4278,51 +4220,20 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
}
else {
(void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
-
- if (heap->compact_cursor) {
- /* If try_move couldn't compact anything, it means
- * the cursors have met and there are no objects left that
- * /can/ be compacted, so we need to quit. */
- if (try_move(objspace, heap, sweep_page, vp)) {
- moved_slots++;
- } else {
- heap->compact_cursor = NULL;
- heap_page_add_freeobj(objspace, sweep_page, vp);
- }
- } else {
- heap_page_add_freeobj(objspace, sweep_page, vp);
- }
-
+ heap_page_add_freeobj(objspace, sweep_page, vp);
gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
- freed_slots++;
+ freed_slots++;
+ asan_poison_object(vp);
}
break;
}
/* minor cases */
- case T_MOVED:
- if (!objspace->flags.during_compacting) {
- /* When compaction is combined with sweeping, some of the swept pages
- * will have T_MOVED objects on them. These need to be kept alive
- * until references are finished updating. Once references are finished
- * updating, `gc_unlink_moved_list` will clear the T_MOVED slots
- * and release them back to the heap. If there are T_MOVED slots
- * in the heap and we're _not_ compacting, then it's a bug. */
- rb_bug("T_MOVED shouldn't be on the heap unless compacting\n");
- }
- break;
case T_ZOMBIE:
/* already counted */
break;
case T_NONE:
- if (heap->compact_cursor) {
- if (try_move(objspace, heap, sweep_page, vp)) {
- moved_slots++;
- } else {
- heap->compact_cursor = NULL;
- }
- }
- empty_slots++; /* already freed */
+ empty_slots++; /* already freed */
break;
}
}
@@ -4346,7 +4257,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
(int)sweep_page->total_slots,
freed_slots, empty_slots, final_slots);
- sweep_page->free_slots = (freed_slots + empty_slots) - moved_slots;
+ sweep_page->free_slots = freed_slots + empty_slots;
objspace->profile.total_freed_objects += freed_slots;
heap_pages_final_slots += final_slots;
sweep_page->final_slots += final_slots;
@@ -4360,7 +4271,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
gc_report(2, objspace, "page_sweep: end.\n");
- return (freed_slots + empty_slots) - moved_slots;
+ return freed_slots + empty_slots;
}
/* allocate additional minimum page to work */
@@ -4546,29 +4457,6 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
}
static void
-gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
-{
- heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
- objspace->profile.compact_count++;
- heap->moved_list = Qfalse;
-}
-
-static void gc_update_references(rb_objspace_t * objspace);
-static void gc_unlink_moved_list(rb_objspace_t *objspace, VALUE moved_list_head);
-
-static void
-gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
-{
- heap->compact_cursor = NULL;
- gc_update_references(objspace);
- rb_clear_constant_cache();
- gc_unlink_moved_list(objspace, heap->moved_list);
- heap->moved_list = Qfalse;
- objspace->flags.compact = 0;
- objspace->flags.during_compacting = FALSE;
-}
-
-static void
gc_sweep(rb_objspace_t *objspace)
{
const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
@@ -4580,13 +4468,7 @@ gc_sweep(rb_objspace_t *objspace)
gc_prof_sweep_timer_start(objspace);
#endif
gc_sweep_start(objspace);
- if (objspace->flags.compact) {
- gc_compact_start(objspace, heap_eden);
- }
gc_sweep_rest(objspace);
- if (objspace->flags.compact) {
- gc_compact_finish(objspace, heap_eden);
- }
#if !GC_ENABLE_LAZY_SWEEP
gc_prof_sweep_timer_stop(objspace);
#endif
@@ -7401,8 +7283,6 @@ gc_start(rb_objspace_t *objspace, int reason)
/* reason may be clobbered, later, so keep set immediate_sweep here */
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
- objspace->flags.compact = !!((unsigned)reason & GPR_FLAG_COMPACT);
- objspace->flags.during_compacting = TRUE;
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
@@ -7664,7 +7544,7 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
}
static VALUE
-gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
+gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep)
{
rb_objspace_t *objspace = &rb_objspace;
int reason = GPR_FLAG_FULL_MARK |
@@ -7672,14 +7552,9 @@ gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE
GPR_FLAG_IMMEDIATE_SWEEP |
GPR_FLAG_METHOD;
- /* For now, compact implies full mark / sweep, so ignore other flags */
- if (RTEST(compact)) {
- reason |= GPR_FLAG_COMPACT;
- } else {
- if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
- if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
- if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
- }
+ if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
+ if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
+ if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
garbage_collect(objspace, reason);
gc_finalize_deferred(objspace);
diff --git a/gc.rb b/gc.rb
index 881b8659ce..dd5781b603 100644
--- a/gc.rb
+++ b/gc.rb
@@ -26,17 +26,16 @@ module GC
#
# Use full_mark: false to perform a minor GC.
# Use immediate_sweep: false to defer sweeping (use lazy sweep).
- # Use compact: true to compact the heap (it implies a full mark and sweep).
#
# Note: These keyword arguments are implementation and version dependent. They
# are not guaranteed to be future-compatible, and may be ignored if the
# underlying implementation does not support them.
- def self.start full_mark: true, immediate_mark: true, immediate_sweep: true, compact: false
- __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep, compact
+ def self.start full_mark: true, immediate_mark: true, immediate_sweep: true
+ __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep
end
def garbage_collect full_mark: true, immediate_mark: true, immediate_sweep: true
- __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep, false
+ __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep
end
# call-seq:
@@ -189,7 +188,7 @@ end
module ObjectSpace
def garbage_collect full_mark: true, immediate_mark: true, immediate_sweep: true
- __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep, false
+ __builtin_gc_start_internal full_mark, immediate_mark, immediate_sweep
end
module_function :garbage_collect