From dd124a0f841f2e25daa4f4e3c9854618176a7743 Mon Sep 17 00:00:00 2001 From: nari Date: Sun, 5 Aug 2012 10:39:37 +0000 Subject: * gc.c: just move functions and so on. I don't touch any internal implementation. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36626 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- gc.c | 5090 +++++++++++++++++++++++++++++++++--------------------------------- 1 file changed, 2558 insertions(+), 2532 deletions(-) (limited to 'gc.c') diff --git a/gc.c b/gc.c index 71d757276d..84fae87297 100644 --- a/gc.c +++ b/gc.c @@ -114,8 +114,6 @@ static ruby_gc_params_t initial_params = { #define MARK_STACK_MAX 1024 -int ruby_gc_debug_indent = 0; - #ifndef GC_PROFILE_MORE_DETAIL #define GC_PROFILE_MORE_DETAIL 0 #endif @@ -302,12 +300,68 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress; #define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) +#define RANY(o) ((RVALUE*)(o)) +#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist) + #define HEAP_HEADER(p) ((struct heaps_header *)(p)) +#define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK))) +#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base) +#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits) +#define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE)) +#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8)) +#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1)) +#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p))) + +#ifndef HEAP_ALIGN_LOG +/* default tiny heap size: 16KB */ +#define HEAP_ALIGN_LOG 14 +#endif + +#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG) +#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG)) +#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5) +#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC) +#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) + +#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)) +#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8) + +int ruby_gc_debug_indent = 0; +VALUE rb_mGC; +extern st_table *rb_class_tbl; +int ruby_disable_gc_stress = 0; static void rb_objspace_call_finalizer(rb_objspace_t *objspace); static VALUE define_final0(VALUE obj, VALUE block); VALUE rb_define_final(VALUE obj, VALUE block); VALUE rb_undefine_final(VALUE obj); +static void run_final(rb_objspace_t *objspace, VALUE obj); +static void initial_expand_heap(rb_objspace_t *objspace); + +static void negative_size_allocation_error(const char *); +static void *aligned_malloc(size_t, size_t); +static void aligned_free(void *); + +static VALUE lazy_sweep_enable(void); +static int garbage_collect(rb_objspace_t *); +static int gc_lazy_sweep(rb_objspace_t *); +static void mark_tbl(rb_objspace_t *, st_table *, int); + +static double getrusage_time(void); +static inline void gc_prof_timer_start(rb_objspace_t *); +static inline void gc_prof_timer_stop(rb_objspace_t *, int); +static inline void gc_prof_mark_timer_start(rb_objspace_t *); +static inline void gc_prof_mark_timer_stop(rb_objspace_t *); +static inline void gc_prof_sweep_timer_start(rb_objspace_t *); +static inline void gc_prof_sweep_timer_stop(rb_objspace_t *); +static inline void gc_prof_set_malloc_info(rb_objspace_t *); +static inline void gc_prof_inc_live_num(rb_objspace_t *); +static inline void gc_prof_dec_live_num(rb_objspace_t *); + + +/* + --------------------------- ObjectSpace ----------------------------- +*/ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE rb_objspace_t * @@ -322,49 +376,6 @@ rb_objspace_alloc(void) } #endif -static void initial_expand_heap(rb_objspace_t *objspace); - -void -rb_gc_set_params(void) -{ - char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr; - - if (rb_safe_level() > 0) return; - - malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT"); - if (malloc_limit_ptr != NULL) { - int malloc_limit_i = atoi(malloc_limit_ptr); - if (RTEST(ruby_verbose)) - fprintf(stderr, "malloc_limit=%d (%d)\n", - malloc_limit_i, initial_malloc_limit); - if (malloc_limit_i > 0) { - initial_malloc_limit = malloc_limit_i; - } - } - - heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); - if (heap_min_slots_ptr != NULL) { - int heap_min_slots_i = atoi(heap_min_slots_ptr); - if (RTEST(ruby_verbose)) - fprintf(stderr, "heap_min_slots=%d (%d)\n", - heap_min_slots_i, initial_heap_min_slots); - if (heap_min_slots_i > 0) { - initial_heap_min_slots = heap_min_slots_i; - initial_expand_heap(&rb_objspace); - } - } - - free_min_ptr = getenv("RUBY_FREE_MIN"); - if (free_min_ptr != NULL) { - int free_min_i = atoi(free_min_ptr); - if (RTEST(ruby_verbose)) - fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min); - if (free_min_i > 0) { - initial_free_min = free_min_i; - } - } -} - #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE static void gc_sweep(rb_objspace_t *); static void slot_sweep(rb_objspace_t *, struct heaps_slot *); @@ -408,2112 +419,2329 @@ rb_objspace_free(rb_objspace_t *objspace) } #endif -#ifndef HEAP_ALIGN_LOG -/* default tiny heap size: 16KB */ -#define HEAP_ALIGN_LOG 14 -#endif -#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG) -#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG)) -#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5) -#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC) -#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) - -#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)) -#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8) - -#define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK))) -#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base) -#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits) -#define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE)) -#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8)) -#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1)) -#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p))) -#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p))) -#define CLEAR_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] &= ~((uintptr_t)1 << BITMAP_OFFSET(p))) - -extern st_table *rb_class_tbl; - -int ruby_disable_gc_stress = 0; - -static void run_final(rb_objspace_t *objspace, VALUE obj); -static int garbage_collect(rb_objspace_t *objspace); -static int gc_lazy_sweep(rb_objspace_t *objspace); - -static double getrusage_time(void); -static inline void gc_prof_timer_start(rb_objspace_t *); -static inline void gc_prof_timer_stop(rb_objspace_t *, int); -static inline void gc_prof_mark_timer_start(rb_objspace_t *); -static inline void gc_prof_mark_timer_stop(rb_objspace_t *); -static inline void gc_prof_sweep_timer_start(rb_objspace_t *); -static inline void gc_prof_sweep_timer_stop(rb_objspace_t *); -static inline void gc_prof_set_malloc_info(rb_objspace_t *); -static inline void gc_prof_inc_live_num(rb_objspace_t *); -static inline void gc_prof_dec_live_num(rb_objspace_t *); - void rb_global_variable(VALUE *var) { rb_gc_register_address(var); } -static void * -ruby_memerror_body(void *dummy) -{ - rb_memerror(); - return 0; -} - static void -ruby_memerror(void) +allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length) { - if (ruby_thread_has_gvl_p()) { - rb_memerror(); + struct sorted_heaps_slot *p; + struct heaps_free_bitmap *bits; + size_t size, add, i; + + size = next_heaps_length*sizeof(struct sorted_heaps_slot); + add = next_heaps_length - heaps_used; + + if (heaps_used > 0) { + p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size); + if (p) objspace->heap.sorted = p; } else { - if (ruby_native_thread_p()) { - rb_thread_call_with_gvl(ruby_memerror_body, 0); - } - else { - /* no ruby thread */ - fprintf(stderr, "[FATAL] failed to allocate memory\n"); - exit(EXIT_FAILURE); - } + p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size); } -} -void -rb_memerror(void) -{ - rb_thread_t *th = GET_THREAD(); - if (!nomem_error || - (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { - fprintf(stderr, "[FATAL] failed to allocate memory\n"); - exit(EXIT_FAILURE); + if (p == 0) { + during_gc = 0; + rb_memerror(); } - if (rb_thread_raised_p(th, RAISED_NOMEMORY)) { - rb_thread_raised_clear(th); - GET_THREAD()->errinfo = nomem_error; - JUMP_TAG(TAG_RAISE); + + for (i = 0; i < add; i++) { + bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); + if (bits == 0) { + during_gc = 0; + rb_memerror(); + return; + } + bits->next = objspace->heap.free_bitmap; + objspace->heap.free_bitmap = bits; } - rb_thread_raised_set(th, RAISED_NOMEMORY); - rb_exc_raise(nomem_error); } -/* - * call-seq: - * GC.stress -> true or false - * - * returns current status of GC stress mode. - */ - -static VALUE -gc_stress_get(VALUE self) +static void +link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) { - rb_objspace_t *objspace = &rb_objspace; - return ruby_gc_stress ? Qtrue : Qfalse; + slot->free_next = objspace->heap.free_slots; + objspace->heap.free_slots = slot; } -/* - * call-seq: - * GC.stress = bool -> bool - * - * Updates the GC stress mode. - * - * When stress mode is enabled the GC is invoked at every GC opportunity: - * all memory and object allocations. - * - * Enabling stress mode makes Ruby very slow, it is only for debugging. - */ - -static VALUE -gc_stress_set(VALUE self, VALUE flag) +static void +unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) { - rb_objspace_t *objspace = &rb_objspace; - rb_secure(2); - ruby_gc_stress = RTEST(flag); - return flag; + objspace->heap.free_slots = slot->free_next; + slot->free_next = NULL; } -/* - * call-seq: - * GC::Profiler.enable? -> true or false - * - * The current status of GC profile mode. - */ - -static VALUE -gc_profile_enable_get(VALUE self) +static void +assign_heap_slot(rb_objspace_t *objspace) { - rb_objspace_t *objspace = &rb_objspace; - return objspace->profile.run ? Qtrue : Qfalse; -} - -/* - * call-seq: - * GC::Profiler.enable -> nil - * - * Starts the GC profiler. - * - */ + RVALUE *p, *pend, *membase; + struct heaps_slot *slot; + size_t hi, lo, mid; + size_t objs; -static VALUE -gc_profile_enable(void) -{ - rb_objspace_t *objspace = &rb_objspace; - - objspace->profile.run = TRUE; - return Qnil; -} - -/* - * call-seq: - * GC::Profiler.disable -> nil - * - * Stops the GC profiler. - * - */ - -static VALUE -gc_profile_disable(void) -{ - rb_objspace_t *objspace = &rb_objspace; - - objspace->profile.run = FALSE; - return Qnil; -} - -static void * -negative_size_allocation_error_with_gvl(void *ptr) -{ - rb_raise(rb_eNoMemError, "%s", (const char *)ptr); - return 0; /* should not be reached */ -} - -static void -negative_size_allocation_error(const char *msg) -{ - if (ruby_thread_has_gvl_p()) { - rb_raise(rb_eNoMemError, "%s", msg); + objs = HEAP_OBJ_LIMIT; + p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE); + if (p == 0) { + during_gc = 0; + rb_memerror(); } - else { - if (ruby_native_thread_p()) { - rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg); - } - else { - fprintf(stderr, "[FATAL] %s\n", msg); - exit(EXIT_FAILURE); - } + slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot)); + if (slot == 0) { + aligned_free(p); + during_gc = 0; + rb_memerror(); } -} + MEMZERO((void*)slot, struct heaps_slot, 1); -static void * -gc_with_gvl(void *ptr) -{ - return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr); -} + slot->next = heaps; + if (heaps) heaps->prev = slot; + heaps = slot; -static int -garbage_collect_with_gvl(rb_objspace_t *objspace) -{ - if (dont_gc) return TRUE; - if (ruby_thread_has_gvl_p()) { - return garbage_collect(objspace); + membase = p; + p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header)); + if ((VALUE)p % sizeof(RVALUE) != 0) { + p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE))); + objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE); } - else { - if (ruby_native_thread_p()) { - return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace); + + lo = 0; + hi = heaps_used; + while (lo < hi) { + register RVALUE *mid_membase; + mid = (lo + hi) / 2; + mid_membase = objspace->heap.sorted[mid].slot->membase; + if (mid_membase < membase) { + lo = mid + 1; + } + else if (mid_membase > membase) { + hi = mid; } else { - /* no ruby thread */ - fprintf(stderr, "[FATAL] failed to allocate memory\n"); - exit(EXIT_FAILURE); + rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid); } } -} + if (hi < heaps_used) { + MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi); + } + objspace->heap.sorted[hi].slot = slot; + objspace->heap.sorted[hi].start = p; + objspace->heap.sorted[hi].end = (p + objs); + heaps->membase = membase; + heaps->slot = p; + heaps->limit = objs; + assert(objspace->heap.free_bitmap != NULL); + heaps->bits = (uintptr_t *)objspace->heap.free_bitmap; + objspace->heap.free_bitmap = objspace->heap.free_bitmap->next; + HEAP_HEADER(membase)->base = heaps; + HEAP_HEADER(membase)->bits = heaps->bits; + memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); + objspace->heap.free_num += objs; + pend = p + objs; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; -static void vm_xfree(rb_objspace_t *objspace, void *ptr); + while (p < pend) { + p->as.free.flags = 0; + p->as.free.next = heaps->freelist; + heaps->freelist = p; + p++; + } + link_free_heap_slot(objspace, heaps); +} -static inline size_t -vm_malloc_prepare(rb_objspace_t *objspace, size_t size) +static void +add_heap_slots(rb_objspace_t *objspace, size_t add) { - if ((ssize_t)size < 0) { - negative_size_allocation_error("negative allocation size (or too big)"); - } - if (size == 0) size = 1; + size_t i; + size_t next_heaps_length; -#if CALC_EXACT_MALLOC_SIZE - size += sizeof(size_t); -#endif + next_heaps_length = heaps_used + add; - if ((ruby_gc_stress && !ruby_disable_gc_stress) || - (malloc_increase+size) > malloc_limit) { - garbage_collect_with_gvl(objspace); + if (next_heaps_length > heaps_length) { + allocate_sorted_heaps(objspace, next_heaps_length); + heaps_length = next_heaps_length; } - return size; + for (i = 0; i < add; i++) { + assign_heap_slot(objspace); + } + heaps_inc = 0; } -static inline void * -vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size) +static void +init_heap(rb_objspace_t *objspace) { - ATOMIC_SIZE_ADD(malloc_increase, size); - -#if CALC_EXACT_MALLOC_SIZE - ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size); - ATOMIC_SIZE_INC(objspace->malloc_params.allocations); - ((size_t *)mem)[0] = size; - mem = (size_t *)mem + 1; + add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT); +#ifdef USE_SIGALTSTACK + { + /* altstack of another threads are allocated in another place */ + rb_thread_t *th = GET_THREAD(); + void *tmp = th->altstack; + th->altstack = malloc(ALT_STACK_SIZE); + free(tmp); /* free previously allocated area */ + } #endif - return mem; + objspace->profile.invoke_time = getrusage_time(); + finalizer_table = st_init_numtable(); } -#define TRY_WITH_GC(alloc) do { \ - if (!(alloc) && \ - (!garbage_collect_with_gvl(objspace) || \ - !(alloc))) { \ - ruby_memerror(); \ - } \ - } while (0) - -static void * -vm_xmalloc(rb_objspace_t *objspace, size_t size) +static void +initial_expand_heap(rb_objspace_t *objspace) { - void *mem; + size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT; - size = vm_malloc_prepare(objspace, size); - TRY_WITH_GC(mem = malloc(size)); - return vm_malloc_fixup(objspace, mem, size); + if (min_size > heaps_used) { + add_heap_slots(objspace, min_size - heaps_used); + } } -static void * -vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size) +static void +set_heaps_increment(rb_objspace_t *objspace) { - void *mem; -#if CALC_EXACT_MALLOC_SIZE - size_t oldsize; -#endif + size_t next_heaps_length = (size_t)(heaps_used * 1.8); - if ((ssize_t)size < 0) { - negative_size_allocation_error("negative re-allocation size"); - } - if (!ptr) return vm_xmalloc(objspace, size); - if (size == 0) { - vm_xfree(objspace, ptr); - return 0; + if (next_heaps_length == heaps_used) { + next_heaps_length++; } - if (ruby_gc_stress && !ruby_disable_gc_stress) - garbage_collect_with_gvl(objspace); -#if CALC_EXACT_MALLOC_SIZE - size += sizeof(size_t); - ptr = (size_t *)ptr - 1; - oldsize = ((size_t *)ptr)[0]; -#endif + heaps_inc = next_heaps_length - heaps_used; - mem = realloc(ptr, size); - if (!mem) { - if (garbage_collect_with_gvl(objspace)) { - mem = realloc(ptr, size); - } - if (!mem) { - ruby_memerror(); - } + if (next_heaps_length > heaps_length) { + allocate_sorted_heaps(objspace, next_heaps_length); + heaps_length = next_heaps_length; } - ATOMIC_SIZE_ADD(malloc_increase, size); - -#if CALC_EXACT_MALLOC_SIZE - ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize); - ((size_t *)mem)[0] = size; - mem = (size_t *)mem + 1; -#endif - - return mem; } -static void -vm_xfree(rb_objspace_t *objspace, void *ptr) +static int +heaps_increment(rb_objspace_t *objspace) { -#if CALC_EXACT_MALLOC_SIZE - size_t size; - ptr = ((size_t *)ptr) - 1; - size = ((size_t*)ptr)[0]; - if (size) { - ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size); - ATOMIC_SIZE_DEC(objspace->malloc_params.allocations); + if (heaps_inc > 0) { + assign_heap_slot(objspace); + heaps_inc--; + return TRUE; } -#endif + return FALSE; +} - free(ptr); -} - -void * -ruby_xmalloc(size_t size) +VALUE +rb_newobj(void) { - return vm_xmalloc(&rb_objspace, size); -} + rb_objspace_t *objspace = &rb_objspace; + VALUE obj; -static inline size_t -xmalloc2_size(size_t n, size_t size) -{ - size_t len = size * n; - if (n != 0 && size != len / n) { - rb_raise(rb_eArgError, "malloc: possible integer overflow"); + if (UNLIKELY(during_gc)) { + dont_gc = 1; + during_gc = 0; + rb_bug("object allocation during garbage collection phase"); } - return len; -} -void * -ruby_xmalloc2(size_t n, size_t size) -{ - return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size)); + if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) { + if (!garbage_collect(objspace)) { + during_gc = 0; + rb_memerror(); + } + } + + if (UNLIKELY(!has_free_object)) { + if (!gc_lazy_sweep(objspace)) { + during_gc = 0; + rb_memerror(); + } + } + + obj = (VALUE)objspace->heap.free_slots->freelist; + objspace->heap.free_slots->freelist = RANY(obj)->as.free.next; + if (objspace->heap.free_slots->freelist == NULL) { + unlink_free_heap_slot(objspace, objspace->heap.free_slots); + } + + MEMZERO((void*)obj, RVALUE, 1); +#ifdef GC_DEBUG + RANY(obj)->file = rb_sourcefile(); + RANY(obj)->line = rb_sourceline(); +#endif + gc_prof_inc_live_num(objspace); + + return obj; } -static void * -vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize) +NODE* +rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2) { - void *mem; - size_t size; + NODE *n = (NODE*)rb_newobj(); - size = xmalloc2_size(count, elsize); - size = vm_malloc_prepare(objspace, size); + n->flags |= T_NODE; + nd_set_type(n, type); - TRY_WITH_GC(mem = calloc(1, size)); - return vm_malloc_fixup(objspace, mem, size); + n->u1.value = a0; + n->u2.value = a1; + n->u3.value = a2; + + return n; } -void * -ruby_xcalloc(size_t n, size_t size) +VALUE +rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree) { - return vm_xcalloc(&rb_objspace, n, size); + NEWOBJ(data, struct RData); + if (klass) Check_Type(klass, T_CLASS); + OBJSETUP(data, klass, T_DATA); + data->data = datap; + data->dfree = dfree; + data->dmark = dmark; + + return (VALUE)data; } -void * -ruby_xrealloc(void *ptr, size_t size) +VALUE +rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type) { - return vm_xrealloc(&rb_objspace, ptr, size); + NEWOBJ(data, struct RTypedData); + + if (klass) Check_Type(klass, T_CLASS); + + OBJSETUP(data, klass, T_DATA); + + data->data = datap; + data->typed_flag = 1; + data->type = type; + + return (VALUE)data; } -void * -ruby_xrealloc2(void *ptr, size_t n, size_t size) +size_t +rb_objspace_data_type_memsize(VALUE obj) { - size_t len = size * n; - if (n != 0 && size != len / n) { - rb_raise(rb_eArgError, "realloc: possible integer overflow"); + if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) { + return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj)); + } + else { + return 0; } - return ruby_xrealloc(ptr, len); } -void -ruby_xfree(void *x) +const char * +rb_objspace_data_type_name(VALUE obj) { - if (x) - vm_xfree(&rb_objspace, x); + if (RTYPEDDATA_P(obj)) { + return RTYPEDDATA_TYPE(obj)->wrap_struct_name; + } + else { + return 0; + } } +static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev); +static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev); -/* Mimic ruby_xmalloc, but need not rb_objspace. - * should return pointer suitable for ruby_xfree - */ -void * -ruby_mimmalloc(size_t size) +static inline int +is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) { - void *mem; -#if CALC_EXACT_MALLOC_SIZE - size += sizeof(size_t); -#endif - mem = malloc(size); -#if CALC_EXACT_MALLOC_SIZE - /* set 0 for consistency of allocated_size/allocations */ - ((size_t *)mem)[0] = 0; - mem = (size_t *)mem + 1; -#endif - return mem; -} - -/* - * call-seq: - * GC.enable -> true or false - * - * Enables garbage collection, returning true if garbage - * collection was previously disabled. - * - * GC.disable #=> false - * GC.enable #=> true - * GC.enable #=> false - * - */ + register RVALUE *p = RANY(ptr); + register struct sorted_heaps_slot *heap; + register size_t hi, lo, mid; -VALUE -rb_gc_enable(void) -{ - rb_objspace_t *objspace = &rb_objspace; - int old = dont_gc; + if (p < lomem || p > himem) return FALSE; + if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE; - dont_gc = FALSE; - return old ? Qtrue : Qfalse; + /* check if p looks like a pointer using bsearch*/ + lo = 0; + hi = heaps_used; + while (lo < hi) { + mid = (lo + hi) / 2; + heap = &objspace->heap.sorted[mid]; + if (heap->start <= p) { + if (p < heap->end) + return TRUE; + lo = mid + 1; + } + else { + hi = mid; + } + } + return FALSE; } -/* - * call-seq: - * GC.disable -> true or false - * - * Disables garbage collection, returning true if garbage - * collection was already disabled. - * - * GC.disable #=> false - * GC.disable #=> true - * - */ - -VALUE -rb_gc_disable(void) +static int +free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data) { - rb_objspace_t *objspace = &rb_objspace; - int old = dont_gc; - - dont_gc = TRUE; - return old ? Qtrue : Qfalse; + if (!me->mark) { + rb_free_method_entry(me); + } + return ST_CONTINUE; } -VALUE rb_mGC; - void -rb_gc_register_mark_object(VALUE obj) +rb_free_m_table(st_table *tbl) { - VALUE ary = GET_THREAD()->vm->mark_object_ary; - rb_ary_push(ary, obj); + st_foreach(tbl, free_method_entry_i, 0); + st_free_table(tbl); } -void -rb_gc_register_address(VALUE *addr) +static int +free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data) { - rb_objspace_t *objspace = &rb_objspace; - struct gc_list *tmp; - - tmp = ALLOC(struct gc_list); - tmp->next = global_List; - tmp->varptr = addr; - global_List = tmp; + xfree(ce); + return ST_CONTINUE; } void -rb_gc_unregister_address(VALUE *addr) +rb_free_const_table(st_table *tbl) { - rb_objspace_t *objspace = &rb_objspace; - struct gc_list *tmp = global_List; + st_foreach(tbl, free_const_entry_i, 0); + st_free_table(tbl); +} - if (tmp->varptr == addr) { - global_List = tmp->next; - xfree(tmp); - return; - } - while (tmp->next) { - if (tmp->next->varptr == addr) { - struct gc_list *t = tmp->next; +static int obj_free(rb_objspace_t *, VALUE); - tmp->next = tmp->next->next; - xfree(t); - break; - } - tmp = tmp->next; - } -} - -static void -allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length) -{ - struct sorted_heaps_slot *p; - struct heaps_free_bitmap *bits; - size_t size, add, i; - - size = next_heaps_length*sizeof(struct sorted_heaps_slot); - add = next_heaps_length - heaps_used; - - if (heaps_used > 0) { - p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size); - if (p) objspace->heap.sorted = p; - } - else { - p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size); - } - - if (p == 0) { - during_gc = 0; - rb_memerror(); - } - - for (i = 0; i < add; i++) { - bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); - if (bits == 0) { - during_gc = 0; - rb_memerror(); - return; - } - bits->next = objspace->heap.free_bitmap; - objspace->heap.free_bitmap = bits; - } -} - -static void * -aligned_malloc(size_t alignment, size_t size) +static inline struct heaps_slot * +add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p) { - void *res; - -#if defined __MINGW32__ - res = __mingw_aligned_malloc(size, alignment); -#elif defined _WIN32 && !defined __CYGWIN__ - res = _aligned_malloc(size, alignment); -#elif defined(HAVE_POSIX_MEMALIGN) - if (posix_memalign(&res, alignment, size) == 0) { - return res; - } - else { - return NULL; - } -#elif defined(HAVE_MEMALIGN) - res = memalign(alignment, size); -#else - char* aligned; - res = malloc(alignment + size + sizeof(void*)); - aligned = (char*)res + alignment + sizeof(void*); - aligned -= ((VALUE)aligned & (alignment - 1)); - ((void**)aligned)[-1] = res; - res = (void*)aligned; -#endif - -#if defined(_DEBUG) || defined(GC_DEBUG) - /* alignment must be a power of 2 */ - assert((alignment - 1) & alignment == 0); - assert(alignment % sizeof(void*) == 0); -#endif - return res; -} + struct heaps_slot *slot; -static void -aligned_free(void *ptr) -{ -#if defined __MINGW32__ - __mingw_aligned_free(ptr); -#elif defined _WIN32 && !defined __CYGWIN__ - _aligned_free(ptr); -#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN) - free(ptr); -#else - free(((void**)ptr)[-1]); -#endif -} + VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); + p->as.free.flags = 0; + slot = GET_HEAP_SLOT(p); + p->as.free.next = slot->freelist; + slot->freelist = p; -static void -link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) -{ - slot->free_next = objspace->heap.free_slots; - objspace->heap.free_slots = slot; + return slot; } static void -unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) +unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) { - objspace->heap.free_slots = slot->free_next; - slot->free_next = NULL; + if (slot->prev) + slot->prev->next = slot->next; + if (slot->next) + slot->next->prev = slot->prev; + if (heaps == slot) + heaps = slot->next; + if (objspace->heap.sweep_slots == slot) + objspace->heap.sweep_slots = slot->next; + slot->prev = NULL; + slot->next = NULL; } static void -assign_heap_slot(rb_objspace_t *objspace) +free_unused_heaps(rb_objspace_t *objspace) { - RVALUE *p, *pend, *membase; - struct heaps_slot *slot; - size_t hi, lo, mid; - size_t objs; - - objs = HEAP_OBJ_LIMIT; - p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE); - if (p == 0) { - during_gc = 0; - rb_memerror(); - } - slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot)); - if (slot == 0) { - aligned_free(p); - during_gc = 0; - rb_memerror(); - } - MEMZERO((void*)slot, struct heaps_slot, 1); - - slot->next = heaps; - if (heaps) heaps->prev = slot; - heaps = slot; - - membase = p; - p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header)); - if ((VALUE)p % sizeof(RVALUE) != 0) { - p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE))); - objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE); - } + size_t i, j; + RVALUE *last = 0; - lo = 0; - hi = heaps_used; - while (lo < hi) { - register RVALUE *mid_membase; - mid = (lo + hi) / 2; - mid_membase = objspace->heap.sorted[mid].slot->membase; - if (mid_membase < membase) { - lo = mid + 1; - } - else if (mid_membase > membase) { - hi = mid; + for (i = j = 1; j < heaps_used; i++) { + if (objspace->heap.sorted[i].slot->limit == 0) { + struct heaps_slot* h = objspace->heap.sorted[i].slot; + ((struct heaps_free_bitmap *)(h->bits))->next = + objspace->heap.free_bitmap; + objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits; + if (!last) { + last = objspace->heap.sorted[i].slot->membase; + } + else { + aligned_free(objspace->heap.sorted[i].slot->membase); + } + free(objspace->heap.sorted[i].slot); + heaps_used--; } else { - rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid); + if (i != j) { + objspace->heap.sorted[j] = objspace->heap.sorted[i]; + } + j++; } } - if (hi < heaps_used) { - MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi); - } - objspace->heap.sorted[hi].slot = slot; - objspace->heap.sorted[hi].start = p; - objspace->heap.sorted[hi].end = (p + objs); - heaps->membase = membase; - heaps->slot = p; - heaps->limit = objs; - assert(objspace->heap.free_bitmap != NULL); - heaps->bits = (uintptr_t *)objspace->heap.free_bitmap; - objspace->heap.free_bitmap = objspace->heap.free_bitmap->next; - HEAP_HEADER(membase)->base = heaps; - HEAP_HEADER(membase)->bits = heaps->bits; - memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); - objspace->heap.free_num += objs; - pend = p + objs; - if (lomem == 0 || lomem > p) lomem = p; - if (himem < pend) himem = pend; - heaps_used++; - - while (p < pend) { - p->as.free.flags = 0; - p->as.free.next = heaps->freelist; - heaps->freelist = p; - p++; + if (last) { + if (last < heaps_freed) { + aligned_free(heaps_freed); + heaps_freed = last; + } + else { + aligned_free(last); + } } - link_free_heap_slot(objspace, heaps); } - -static void -add_heap_slots(rb_objspace_t *objspace, size_t add) +static inline void +make_deferred(RVALUE *p) { - size_t i; - size_t next_heaps_length; - - next_heaps_length = heaps_used + add; - - if (next_heaps_length > heaps_length) { - allocate_sorted_heaps(objspace, next_heaps_length); - heaps_length = next_heaps_length; - } - - for (i = 0; i < add; i++) { - assign_heap_slot(objspace); - } - heaps_inc = 0; + p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE; } -static void -init_heap(rb_objspace_t *objspace) +static inline void +make_io_deferred(RVALUE *p) { - add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT); -#ifdef USE_SIGALTSTACK - { - /* altstack of another threads are allocated in another place */ - rb_thread_t *th = GET_THREAD(); - void *tmp = th->altstack; - th->altstack = malloc(ALT_STACK_SIZE); - free(tmp); /* free previously allocated area */ - } -#endif - - objspace->profile.invoke_time = getrusage_time(); - finalizer_table = st_init_numtable(); + rb_io_t *fptr = p->as.file.fptr; + make_deferred(p); + p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize; + p->as.data.data = fptr; } -static void -initial_expand_heap(rb_objspace_t *objspace) +static int +obj_free(rb_objspace_t *objspace, VALUE obj) { - size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT; + switch (BUILTIN_TYPE(obj)) { + case T_NIL: + case T_FIXNUM: + case T_TRUE: + case T_FALSE: + rb_bug("obj_free() called for broken object"); + break; + } - if (min_size > heaps_used) { - add_heap_slots(objspace, min_size - heaps_used); + if (FL_TEST(obj, FL_EXIVAR)) { + rb_free_generic_ivar((VALUE)obj); + FL_UNSET(obj, FL_EXIVAR); } -} -static void -set_heaps_increment(rb_objspace_t *objspace) -{ - size_t next_heaps_length = (size_t)(heaps_used * 1.8); + switch (BUILTIN_TYPE(obj)) { + case T_OBJECT: + if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) && + RANY(obj)->as.object.as.heap.ivptr) { + xfree(RANY(obj)->as.object.as.heap.ivptr); + } + break; + case T_MODULE: + case T_CLASS: + rb_clear_cache_by_class((VALUE)obj); + if (RCLASS_M_TBL(obj)) { + rb_free_m_table(RCLASS_M_TBL(obj)); + } + if (RCLASS_IV_TBL(obj)) { + st_free_table(RCLASS_IV_TBL(obj)); + } + if (RCLASS_CONST_TBL(obj)) { + rb_free_const_table(RCLASS_CONST_TBL(obj)); + } + if (RCLASS_IV_INDEX_TBL(obj)) { + st_free_table(RCLASS_IV_INDEX_TBL(obj)); + } + xfree(RANY(obj)->as.klass.ptr); + break; + case T_STRING: + rb_str_free(obj); + break; + case T_ARRAY: + rb_ary_free(obj); + break; + case T_HASH: + if (RANY(obj)->as.hash.ntbl) { + st_free_table(RANY(obj)->as.hash.ntbl); + } + break; + case T_REGEXP: + if (RANY(obj)->as.regexp.ptr) { + onig_free(RANY(obj)->as.regexp.ptr); + } + break; + case T_DATA: + if (DATA_PTR(obj)) { + if (RTYPEDDATA_P(obj)) { + RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree; + } + if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) { + xfree(DATA_PTR(obj)); + } + else if (RANY(obj)->as.data.dfree) { + make_deferred(RANY(obj)); + return 1; + } + } + break; + case T_MATCH: + if (RANY(obj)->as.match.rmatch) { + struct rmatch *rm = RANY(obj)->as.match.rmatch; + onig_region_free(&rm->regs, 0); + if (rm->char_offset) + xfree(rm->char_offset); + xfree(rm); + } + break; + case T_FILE: + if (RANY(obj)->as.file.fptr) { + make_io_deferred(RANY(obj)); + return 1; + } + break; + case T_RATIONAL: + case T_COMPLEX: + break; + case T_ICLASS: + /* iClass shares table with the module */ + xfree(RANY(obj)->as.klass.ptr); + break; - if (next_heaps_length == heaps_used) { - next_heaps_length++; - } + case T_FLOAT: + break; - heaps_inc = next_heaps_length - heaps_used; + case T_BIGNUM: + if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) { + xfree(RBIGNUM_DIGITS(obj)); + } + break; + case T_NODE: + switch (nd_type(obj)) { + case NODE_SCOPE: + if (RANY(obj)->as.node.u1.tbl) { + xfree(RANY(obj)->as.node.u1.tbl); + } + break; + case NODE_ARGS: + if (RANY(obj)->as.node.u3.args) { + xfree(RANY(obj)->as.node.u3.args); + } + break; + case NODE_ALLOCA: + xfree(RANY(obj)->as.node.u1.node); + break; + } + break; /* no need to free iv_tbl */ - if (next_heaps_length > heaps_length) { - allocate_sorted_heaps(objspace, next_heaps_length); - heaps_length = next_heaps_length; - } -} + case T_STRUCT: + if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 && + RANY(obj)->as.rstruct.as.heap.ptr) { + xfree(RANY(obj)->as.rstruct.as.heap.ptr); + } + break; -static int -heaps_increment(rb_objspace_t *objspace) -{ - if (heaps_inc > 0) { - assign_heap_slot(objspace); - heaps_inc--; - return TRUE; + default: + rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE, + BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags); } - return FALSE; + + return 0; } -int -rb_during_gc(void) +void +Init_heap(void) { - rb_objspace_t *objspace = &rb_objspace; - return during_gc; + init_heap(&rb_objspace); } -#define RANY(o) ((RVALUE*)(o)) -#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist) +typedef int each_obj_callback(void *, void *, size_t, void *); -VALUE -rb_newobj(void) +struct each_obj_args { + each_obj_callback *callback; + void *data; +}; + +static VALUE +objspace_each_objects(VALUE arg) { + size_t i; + RVALUE *membase = 0; + RVALUE *pstart, *pend; rb_objspace_t *objspace = &rb_objspace; - VALUE obj; + struct each_obj_args *args = (struct each_obj_args *)arg; + volatile VALUE v; - if (UNLIKELY(during_gc)) { - dont_gc = 1; - during_gc = 0; - rb_bug("object allocation during garbage collection phase"); - } + i = 0; + while (i < heaps_used) { + while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase) + i--; + while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase) + i++; + if (heaps_used <= i) + break; + membase = objspace->heap.sorted[i].slot->membase; - if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) { - if (!garbage_collect(objspace)) { - during_gc = 0; - rb_memerror(); - } - } + pstart = objspace->heap.sorted[i].slot->slot; + pend = pstart + objspace->heap.sorted[i].slot->limit; - if (UNLIKELY(!has_free_object)) { - if (!gc_lazy_sweep(objspace)) { - during_gc = 0; - rb_memerror(); + for (; pstart != pend; pstart++) { + if (pstart->as.basic.flags) { + v = (VALUE)pstart; /* acquire to save this object */ + break; + } + } + if (pstart != pend) { + if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) { + break; + } } } + RB_GC_GUARD(v); - obj = (VALUE)objspace->heap.free_slots->freelist; - objspace->heap.free_slots->freelist = RANY(obj)->as.free.next; - if (objspace->heap.free_slots->freelist == NULL) { - unlink_free_heap_slot(objspace, objspace->heap.free_slots); - } - - MEMZERO((void*)obj, RVALUE, 1); -#ifdef GC_DEBUG - RANY(obj)->file = rb_sourcefile(); - RANY(obj)->line = rb_sourceline(); -#endif - gc_prof_inc_live_num(objspace); - - return obj; -} - -NODE* -rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2) -{ - NODE *n = (NODE*)rb_newobj(); - - n->flags |= T_NODE; - nd_set_type(n, type); - - n->u1.value = a0; - n->u2.value = a1; - n->u3.value = a2; - - return n; -} - -VALUE -rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree) -{ - NEWOBJ(data, struct RData); - if (klass) Check_Type(klass, T_CLASS); - OBJSETUP(data, klass, T_DATA); - data->data = datap; - data->dfree = dfree; - data->dmark = dmark; - - return (VALUE)data; + return Qnil; } -VALUE -rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type) -{ - NEWOBJ(data, struct RTypedData); - - if (klass) Check_Type(klass, T_CLASS); - - OBJSETUP(data, klass, T_DATA); +/* + * rb_objspace_each_objects() is special C API to walk through + * Ruby object space. This C API is too difficult to use it. + * To be frank, you should not use it. Or you need to read the + * source code of this function and understand what this function does. + * + * 'callback' will be called several times (the number of heap slot, + * at current implementation) with: + * vstart: a pointer to the first living object of the heap_slot. + * vend: a pointer to next to the valid heap_slot area. + * stride: a distance to next VALUE. + * + * If callback() returns non-zero, the iteration will be stopped. + * + * This is a sample callback code to iterate liveness objects: + * + * int + * sample_callback(void *vstart, void *vend, int stride, void *data) { + * VALUE v = (VALUE)vstart; + * for (; v != (VALUE)vend; v += stride) { + * if (RBASIC(v)->flags) { // liveness check + * // do something with live object 'v' + * } + * return 0; // continue to iteration + * } + * + * Note: 'vstart' is not a top of heap_slot. This point the first + * living object to grasp at least one object to avoid GC issue. + * This means that you can not walk through all Ruby object slot + * including freed object slot. + * + * Note: On this implementation, 'stride' is same as sizeof(RVALUE). + * However, there are possibilities to pass variable values with + * 'stride' with some reasons. You must use stride instead of + * use some constant value in the iteration. + */ +void +rb_objspace_each_objects(each_obj_callback *callback, void *data) +{ + struct each_obj_args args; + rb_objspace_t *objspace = &rb_objspace; - data->data = datap; - data->typed_flag = 1; - data->type = type; + rest_sweep(objspace); + objspace->flags.dont_lazy_sweep = TRUE; - return (VALUE)data; + args.callback = callback; + args.data = data; + rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil); } -size_t -rb_objspace_data_type_memsize(VALUE obj) -{ - if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) { - return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj)); - } - else { - return 0; - } -} +struct os_each_struct { + size_t num; + VALUE of; +}; -const char * -rb_objspace_data_type_name(VALUE obj) +static int +os_obj_of_i(void *vstart, void *vend, size_t stride, void *data) { - if (RTYPEDDATA_P(obj)) { - return RTYPEDDATA_TYPE(obj)->wrap_struct_name; - } - else { - return 0; - } -} + struct os_each_struct *oes = (struct os_each_struct *)data; + RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend; + volatile VALUE v; -#ifdef __ia64 -#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp()) -#else -#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end) -#endif + for (; p != pend; p++) { + if (p->as.basic.flags) { + switch (BUILTIN_TYPE(p)) { + case T_NONE: + case T_ICLASS: + case T_NODE: + case T_ZOMBIE: + continue; + case T_CLASS: + if (FL_TEST(p, FL_SINGLETON)) + continue; + default: + if (!p->as.basic.klass) continue; + v = (VALUE)p; + if (!oes->of || rb_obj_is_kind_of(v, oes->of)) { + rb_yield(v); + oes->num++; + } + } + } + } -#define STACK_START (th->machine_stack_start) -#define STACK_END (th->machine_stack_end) -#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE)) + return 0; +} -#if STACK_GROW_DIRECTION < 0 -# define STACK_LENGTH (size_t)(STACK_START - STACK_END) -#elif STACK_GROW_DIRECTION > 0 -# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1) -#else -# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \ - : (size_t)(STACK_END - STACK_START + 1)) -#endif -#if !STACK_GROW_DIRECTION -int ruby_stack_grow_direction; -int -ruby_get_stack_grow_direction(volatile VALUE *addr) +static VALUE +os_obj_of(VALUE of) { - VALUE *end; - SET_MACHINE_STACK_END(&end); + struct os_each_struct oes; - if (end > addr) return ruby_stack_grow_direction = 1; - return ruby_stack_grow_direction = -1; + oes.num = 0; + oes.of = of; + rb_objspace_each_objects(os_obj_of_i, &oes); + return SIZET2NUM(oes.num); } -#endif -#define GC_LEVEL_MAX 250 -#define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD) +/* + * call-seq: + * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum + * ObjectSpace.each_object([module]) -> an_enumerator + * + * Calls the block once for each living, nonimmediate object in this + * Ruby process. If module is specified, calls the block + * for only those classes or modules that match (or are a subclass of) + * module. Returns the number of objects found. Immediate + * objects (Fixnums, Symbols + * true, false, and nil) are + * never returned. In the example below, each_object + * returns both the numbers we defined and several constants defined in + * the Math module. + * + * If no block is given, an enumerator is returned instead. + * + * a = 102.7 + * b = 95 # Won't be returned + * c = 12345678987654321 + * count = ObjectSpace.each_object(Numeric) {|x| p x } + * puts "Total count: #{count}" + * + * produces: + * + * 12345678987654321 + * 102.7 + * 2.71828182845905 + * 3.14159265358979 + * 2.22044604925031e-16 + * 1.7976931348623157e+308 + * 2.2250738585072e-308 + * Total count: 7 + * + */ -size_t -ruby_stack_length(VALUE **p) +static VALUE +os_each_obj(int argc, VALUE *argv, VALUE os) { - rb_thread_t *th = GET_THREAD(); - SET_STACK_END; - if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END); - return STACK_LENGTH; -} + VALUE of; -static int -stack_check(int water_mark) -{ - int ret; - rb_thread_t *th = GET_THREAD(); - SET_STACK_END; - ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark; -#ifdef __ia64 - if (!ret) { - ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start > - th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark; + rb_secure(4); + if (argc == 0) { + of = 0; } -#endif - return ret; + else { + rb_scan_args(argc, argv, "01", &of); + } + RETURN_ENUMERATOR(os, 1, &of); + return os_obj_of(of); } -#define STACKFRAME_FOR_CALL_CFUNC 512 +/* + * call-seq: + * ObjectSpace.undefine_finalizer(obj) + * + * Removes all finalizers for obj. + * + */ -int -ruby_stack_check(void) +static VALUE +undefine_final(VALUE os, VALUE obj) { -#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) - return 0; -#else - return stack_check(STACKFRAME_FOR_CALL_CFUNC); -#endif + return rb_undefine_final(obj); } -static void -init_mark_stack(rb_objspace_t *objspace) +VALUE +rb_undefine_final(VALUE obj) { - mark_stack_overflow = 0; - mark_stack_ptr = mark_stack; + rb_objspace_t *objspace = &rb_objspace; + st_data_t data = obj; + rb_check_frozen(obj); + st_delete(finalizer_table, &data, 0); + FL_UNSET(obj, FL_FINALIZE); + return obj; } -#define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack) - -static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev); -static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev); +/* + * call-seq: + * ObjectSpace.define_finalizer(obj, aProc=proc()) + * + * Adds aProc as a finalizer, to be called after obj + * was destroyed. + * + */ -static void -gc_mark_all(rb_objspace_t *objspace) +static VALUE +define_final(int argc, VALUE *argv, VALUE os) { - RVALUE *p, *pend; - size_t i; + VALUE obj, block; - init_mark_stack(objspace); - for (i = 0; i < heaps_used; i++) { - p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; - while (p < pend) { - if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p) && - p->as.basic.flags) { - gc_mark_children(objspace, (VALUE)p, 0); - } - p++; - } + rb_scan_args(argc, argv, "11", &obj, &block); + rb_check_frozen(obj); + if (argc == 1) { + block = rb_block_proc(); + } + else if (!rb_respond_to(block, rb_intern("call"))) { + rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", + rb_obj_classname(block)); } + return define_final0(obj, block); } -static void -gc_mark_rest(rb_objspace_t *objspace) +static VALUE +define_final0(VALUE obj, VALUE block) { - VALUE tmp_arry[MARK_STACK_MAX]; - VALUE *p; - - p = (mark_stack_ptr - mark_stack) + tmp_arry; - MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry); + rb_objspace_t *objspace = &rb_objspace; + VALUE table; + st_data_t data; - init_mark_stack(objspace); - while (p != tmp_arry) { - p--; - gc_mark_children(objspace, *p, 0); + if (!FL_ABLE(obj)) { + rb_raise(rb_eArgError, "cannot define finalizer for %s", + rb_obj_classname(obj)); } -} - -static inline int -is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) -{ - register RVALUE *p = RANY(ptr); - register struct sorted_heaps_slot *heap; - register size_t hi, lo, mid; + RBASIC(obj)->flags |= FL_FINALIZE; - if (p < lomem || p > himem) return FALSE; - if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE; + block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block); + OBJ_FREEZE(block); - /* check if p looks like a pointer using bsearch*/ - lo = 0; - hi = heaps_used; - while (lo < hi) { - mid = (lo + hi) / 2; - heap = &objspace->heap.sorted[mid]; - if (heap->start <= p) { - if (p < heap->end) - return TRUE; - lo = mid + 1; - } - else { - hi = mid; - } + if (st_lookup(finalizer_table, obj, &data)) { + table = (VALUE)data; + rb_ary_push(table, block); } - return FALSE; + else { + table = rb_ary_new3(1, block); + RBASIC(table)->klass = 0; + st_add_direct(finalizer_table, obj, table); + } + return block; } -static void -mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n) +VALUE +rb_define_final(VALUE obj, VALUE block) { - VALUE v; - while (n--) { - v = *x; - VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)); - if (is_pointer_to_heap(objspace, (void *)v)) { - gc_mark(objspace, v, 0); - } - x++; + rb_check_frozen(obj); + if (!rb_respond_to(block, rb_intern("call"))) { + rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", + rb_obj_classname(block)); } + return define_final0(obj, block); } -static void -gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end) +void +rb_gc_copy_finalizer(VALUE dest, VALUE obj) { - long n; + rb_objspace_t *objspace = &rb_objspace; + VALUE table; + st_data_t data; - if (end <= start) return; - n = end - start; - mark_locations_array(objspace, start, n); + if (!FL_TEST(obj, FL_FINALIZE)) return; + if (st_lookup(finalizer_table, obj, &data)) { + table = (VALUE)data; + st_insert(finalizer_table, dest, table); + } + FL_SET(dest, FL_FINALIZE); } -void -rb_gc_mark_locations(VALUE *start, VALUE *end) +static VALUE +run_single_final(VALUE arg) { - gc_mark_locations(&rb_objspace, start, end); + VALUE *args = (VALUE *)arg; + rb_eval_cmd(args[0], args[1], (int)args[2]); + return Qnil; } -#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end)) +static void +run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table) +{ + long i; + int status; + VALUE args[3]; + VALUE objid = nonspecial_obj_id(obj); -struct mark_tbl_arg { - rb_objspace_t *objspace; - int lev; -}; + if (RARRAY_LEN(table) > 0) { + args[1] = rb_obj_freeze(rb_ary_new3(1, objid)); + } + else { + args[1] = 0; + } -static int -mark_entry(st_data_t key, st_data_t value, st_data_t data) -{ - struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, (VALUE)value, arg->lev); - return ST_CONTINUE; + args[2] = (VALUE)rb_safe_level(); + for (i=0; inum_entries == 0) return; - arg.objspace = objspace; - arg.lev = lev; - st_foreach(tbl, mark_entry, (st_data_t)&arg); + RUBY_DATA_FUNC free_func = 0; + st_data_t key, table; + + objspace->heap.final_num--; + + RBASIC(obj)->klass = 0; + + if (RTYPEDDATA_P(obj)) { + free_func = RTYPEDDATA_TYPE(obj)->function.dfree; + } + else { + free_func = RDATA(obj)->dfree; + } + if (free_func) { + (*free_func)(DATA_PTR(obj)); + } + + key = (st_data_t)obj; + if (st_delete(finalizer_table, &key, &table)) { + run_finalizer(objspace, obj, (VALUE)table); + } } -static int -mark_key(st_data_t key, st_data_t value, st_data_t data) +static void +finalize_list(rb_objspace_t *objspace, RVALUE *p) { - struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, (VALUE)key, arg->lev); - return ST_CONTINUE; + while (p) { + RVALUE *tmp = p->as.free.next; + run_final(objspace, (VALUE)p); + if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */ + add_slot_local_freelist(objspace, p); + if (!is_lazy_sweeping(objspace)) { + gc_prof_dec_live_num(objspace); + } + } + else { + struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark; + slot->limit--; + } + p = tmp; + } } static void -mark_set(rb_objspace_t *objspace, st_table *tbl, int lev) +finalize_deferred(rb_objspace_t *objspace) { - struct mark_tbl_arg arg; - if (!tbl) return; - arg.objspace = objspace; - arg.lev = lev; - st_foreach(tbl, mark_key, (st_data_t)&arg); + RVALUE *p = deferred_final_list; + deferred_final_list = 0; + + if (p) { + finalize_list(objspace, p); + } } void -rb_mark_set(st_table *tbl) +rb_gc_finalize_deferred(void) { - mark_set(&rb_objspace, tbl, 0); + rb_objspace_t *objspace = &rb_objspace; + if (ATOMIC_EXCHANGE(finalizing, 1)) return; + finalize_deferred(objspace); + ATOMIC_SET(finalizing, 0); } static int -mark_keyvalue(st_data_t key, st_data_t value, st_data_t data) +chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg) { - struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, (VALUE)key, arg->lev); - gc_mark(arg->objspace, (VALUE)value, arg->lev); + RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg; + if ((p->as.basic.flags & FL_FINALIZE) == FL_FINALIZE && + !MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) { + if (BUILTIN_TYPE(p) != T_ZOMBIE) { + p->as.free.flags = T_ZOMBIE; + RDATA(p)->dfree = 0; + } + p->as.free.next = *final_list; + *final_list = p; + } return ST_CONTINUE; } -static void -mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev) +struct force_finalize_list { + VALUE obj; + VALUE table; + struct force_finalize_list *next; +}; + +static int +force_chain_object(st_data_t key, st_data_t val, st_data_t arg) { - struct mark_tbl_arg arg; - if (!tbl) return; - arg.objspace = objspace; - arg.lev = lev; - st_foreach(tbl, mark_keyvalue, (st_data_t)&arg); + struct force_finalize_list **prev = (struct force_finalize_list **)arg; + struct force_finalize_list *curr = ALLOC(struct force_finalize_list); + curr->obj = key; + curr->table = val; + curr->next = *prev; + *prev = curr; + return ST_CONTINUE; } void -rb_mark_hash(st_table *tbl) +rb_gc_call_finalizer_at_exit(void) { - mark_hash(&rb_objspace, tbl, 0); + rb_objspace_call_finalizer(&rb_objspace); } static void -mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev) +rb_objspace_call_finalizer(rb_objspace_t *objspace) { - const rb_method_definition_t *def = me->def; + RVALUE *p, *pend; + RVALUE *final_list = 0; + size_t i; - gc_mark(objspace, me->klass, lev); - if (!def) return; - switch (def->type) { - case VM_METHOD_TYPE_ISEQ: - gc_mark(objspace, def->body.iseq->self, lev); - break; - case VM_METHOD_TYPE_BMETHOD: - gc_mark(objspace, def->body.proc, lev); - break; - case VM_METHOD_TYPE_ATTRSET: - case VM_METHOD_TYPE_IVAR: - gc_mark(objspace, def->body.attr.location, lev); - break; - default: - break; /* ignore */ - } -} + /* run finalizers */ + rest_sweep(objspace); -void -rb_mark_method_entry(const rb_method_entry_t *me) -{ - mark_method_entry(&rb_objspace, me, 0); -} + if (ATOMIC_EXCHANGE(finalizing, 1)) return; -static int -mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data) -{ - struct mark_tbl_arg *arg = (void*)data; - mark_method_entry(arg->objspace, me, arg->lev); - return ST_CONTINUE; -} + do { + /* XXX: this loop will make no sense */ + /* because mark will not be removed */ + finalize_deferred(objspace); + mark_tbl(objspace, finalizer_table, 0); + st_foreach(finalizer_table, chain_finalized_object, + (st_data_t)&deferred_final_list); + } while (deferred_final_list); + /* force to run finalizer */ + while (finalizer_table->num_entries) { + struct force_finalize_list *list = 0; + st_foreach(finalizer_table, force_chain_object, (st_data_t)&list); + while (list) { + struct force_finalize_list *curr = list; + st_data_t obj = (st_data_t)curr->obj; + run_finalizer(objspace, curr->obj, curr->table); + st_delete(finalizer_table, &obj, 0); + list = curr->next; + xfree(curr); + } + } -static void -mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) -{ - struct mark_tbl_arg arg; - if (!tbl) return; - arg.objspace = objspace; - arg.lev = lev; - st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg); -} + /* finalizers are part of garbage collection */ + during_gc++; -static int -free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data) -{ - if (!me->mark) { - rb_free_method_entry(me); + /* run data object's finalizers */ + for (i = 0; i < heaps_used; i++) { + p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; + while (p < pend) { + if (BUILTIN_TYPE(p) == T_DATA && + DATA_PTR(p) && RANY(p)->as.data.dfree && + !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) && + !rb_obj_is_fiber((VALUE)p)) { + p->as.free.flags = 0; + if (RTYPEDDATA_P(p)) { + RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree; + } + if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) { + xfree(DATA_PTR(p)); + } + else if (RANY(p)->as.data.dfree) { + make_deferred(RANY(p)); + RANY(p)->as.free.next = final_list; + final_list = p; + } + } + else if (BUILTIN_TYPE(p) == T_FILE) { + if (RANY(p)->as.file.fptr) { + make_io_deferred(RANY(p)); + RANY(p)->as.free.next = final_list; + final_list = p; + } + } + p++; + } + } + during_gc = 0; + if (final_list) { + finalize_list(objspace, final_list); } - return ST_CONTINUE; -} - -void -rb_free_m_table(st_table *tbl) -{ - st_foreach(tbl, free_method_entry_i, 0); - st_free_table(tbl); -} -static int -mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data) -{ - struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, ce->value, arg->lev); - gc_mark(arg->objspace, ce->file, arg->lev); - return ST_CONTINUE; + st_free_table(finalizer_table); + finalizer_table = 0; + ATOMIC_SET(finalizing, 0); } -static void -mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +static inline int +is_id_value(rb_objspace_t *objspace, VALUE ptr) { - struct mark_tbl_arg arg; - if (!tbl) return; - arg.objspace = objspace; - arg.lev = lev; - st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg); + if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE; + if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE; + if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE; + return TRUE; } -static int -free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data) +static inline int +is_dead_object(rb_objspace_t *objspace, VALUE ptr) { - xfree(ce); - return ST_CONTINUE; + struct heaps_slot *slot = objspace->heap.sweep_slots; + if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr)) + return FALSE; + while (slot) { + if ((VALUE)slot->slot <= ptr && ptr < (VALUE)(slot->slot + slot->limit)) + return TRUE; + slot = slot->next; + } + return FALSE; } -void -rb_free_const_table(st_table *tbl) +static inline int +is_live_object(rb_objspace_t *objspace, VALUE ptr) { - st_foreach(tbl, free_const_entry_i, 0); - st_free_table(tbl); + if (BUILTIN_TYPE(ptr) == 0) return FALSE; + if (RBASIC(ptr)->klass == 0) return FALSE; + if (is_dead_object(objspace, ptr)) return FALSE; + return TRUE; } -void -rb_mark_tbl(st_table *tbl) -{ - mark_tbl(&rb_objspace, tbl, 0); -} +/* + * call-seq: + * ObjectSpace._id2ref(object_id) -> an_object + * + * Converts an object id to a reference to the object. May not be + * called on an object id passed as a parameter to a finalizer. + * + * s = "I am a string" #=> "I am a string" + * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string" + * r == s #=> true + * + */ -void -rb_gc_mark_maybe(VALUE obj) +static VALUE +id2ref(VALUE obj, VALUE objid) { - if (is_pointer_to_heap(&rb_objspace, (void *)obj)) { - gc_mark(&rb_objspace, obj, 0); - } -} +#if SIZEOF_LONG == SIZEOF_VOIDP +#define NUM2PTR(x) NUM2ULONG(x) +#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP +#define NUM2PTR(x) NUM2ULL(x) +#endif + rb_objspace_t *objspace = &rb_objspace; + VALUE ptr; + void *p0; -static int -gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr) -{ - register uintptr_t *bits = GET_HEAP_BITMAP(ptr); - if (MARKED_IN_BITMAP(bits, ptr)) return 0; - MARK_IN_BITMAP(bits, ptr); - objspace->heap.live_num++; - return 1; -} + rb_secure(4); + ptr = NUM2PTR(objid); + p0 = (void *)ptr; -static void -gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) -{ - register RVALUE *obj; + if (ptr == Qtrue) return Qtrue; + if (ptr == Qfalse) return Qfalse; + if (ptr == Qnil) return Qnil; + if (FIXNUM_P(ptr)) return (VALUE)ptr; + ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ - obj = RANY(ptr); - if (rb_special_const_p(ptr)) return; /* special const not marked */ - if (obj->as.basic.flags == 0) return; /* free cell */ - if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */ + if ((ptr % sizeof(RVALUE)) == (4 << 2)) { + ID symid = ptr / sizeof(RVALUE); + if (rb_id2name(symid) == 0) + rb_raise(rb_eRangeError, "%p is not symbol id value", p0); + return ID2SYM(symid); + } - if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) { - if (!mark_stack_overflow) { - if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { - *mark_stack_ptr = ptr; - mark_stack_ptr++; - } - else { - mark_stack_overflow = 1; - } - } - return; + if (!is_id_value(objspace, ptr)) { + rb_raise(rb_eRangeError, "%p is not id value", p0); } - gc_mark_children(objspace, ptr, lev+1); + if (!is_live_object(objspace, ptr)) { + rb_raise(rb_eRangeError, "%p is recycled object", p0); + } + return (VALUE)ptr; } -void -rb_gc_mark(VALUE ptr) -{ - gc_mark(&rb_objspace, ptr, 0); -} +/* + * Document-method: __id__ + * Document-method: object_id + * + * call-seq: + * obj.__id__ -> fixnum + * obj.object_id -> fixnum + * + * Returns an integer identifier for obj. The same number will + * be returned on all calls to id for a given object, and + * no two active objects will share an id. + * Object#object_id is a different concept from the + * :name notation, which returns the symbol id of + * name. Replaces the deprecated Object#id. + */ + +/* + * call-seq: + * obj.hash -> fixnum + * + * Generates a Fixnum hash value for this object. This + * function must have the property that a.eql?(b) implies + * a.hash == b.hash. The hash value is used by class + * Hash. Any hash value that exceeds the capacity of a + * Fixnum will be truncated before being used. + */ + +VALUE +rb_obj_id(VALUE obj) +{ + /* + * 32-bit VALUE space + * MSB ------------------------ LSB + * false 00000000000000000000000000000000 + * true 00000000000000000000000000000010 + * nil 00000000000000000000000000000100 + * undef 00000000000000000000000000000110 + * symbol ssssssssssssssssssssssss00001110 + * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE)) + * fixnum fffffffffffffffffffffffffffffff1 + * + * object_id space + * LSB + * false 00000000000000000000000000000000 + * true 00000000000000000000000000000010 + * nil 00000000000000000000000000000100 + * undef 00000000000000000000000000000110 + * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4) + * object oooooooooooooooooooooooooooooo0 o...o % A = 0 + * fixnum fffffffffffffffffffffffffffffff1 bignum if required + * + * where A = sizeof(RVALUE)/4 + * + * sizeof(RVALUE) is + * 20 if 32-bit, double is 4-byte aligned + * 24 if 32-bit, double is 8-byte aligned + * 40 if 64-bit + */ + if (SYMBOL_P(obj)) { + return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG; + } + if (SPECIAL_CONST_P(obj)) { + return LONG2NUM((SIGNED_VALUE)obj); + } + return nonspecial_obj_id(obj); +} + +static int +set_zero(st_data_t key, st_data_t val, st_data_t arg) +{ + VALUE k = (VALUE)key; + VALUE hash = (VALUE)arg; + rb_hash_aset(hash, k, INT2FIX(0)); + return ST_CONTINUE; +} + +/* + * call-seq: + * ObjectSpace.count_objects([result_hash]) -> hash + * + * Counts objects for each type. + * + * It returns a hash as: + * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...} + * + * If the optional argument, result_hash, is given, + * it is overwritten and returned. + * This is intended to avoid probe effect. + * + * The contents of the returned hash is implementation defined. + * It may be changed in future. + * + * This method is not expected to work except C Ruby. + * + */ + +static VALUE +count_objects(int argc, VALUE *argv, VALUE os) +{ + rb_objspace_t *objspace = &rb_objspace; + size_t counts[T_MASK+1]; + size_t freed = 0; + size_t total = 0; + size_t i; + VALUE hash; + + if (rb_scan_args(argc, argv, "01", &hash) == 1) { + if (!RB_TYPE_P(hash, T_HASH)) + rb_raise(rb_eTypeError, "non-hash given"); + } + + for (i = 0; i <= T_MASK; i++) { + counts[i] = 0; + } + + for (i = 0; i < heaps_used; i++) { + RVALUE *p, *pend; + + p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; + for (;p < pend; p++) { + if (p->as.basic.flags) { + counts[BUILTIN_TYPE(p)]++; + } + else { + freed++; + } + } + total += objspace->heap.sorted[i].slot->limit; + } + + if (hash == Qnil) { + hash = rb_hash_new(); + } + else if (!RHASH_EMPTY_P(hash)) { + st_foreach(RHASH_TBL(hash), set_zero, hash); + } + rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total)); + rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed)); + + for (i = 0; i <= T_MASK; i++) { + VALUE type; + switch (i) { +#define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break; + COUNT_TYPE(T_NONE); + COUNT_TYPE(T_OBJECT); + COUNT_TYPE(T_CLASS); + COUNT_TYPE(T_MODULE); + COUNT_TYPE(T_FLOAT); + COUNT_TYPE(T_STRING); + COUNT_TYPE(T_REGEXP); + COUNT_TYPE(T_ARRAY); + COUNT_TYPE(T_HASH); + COUNT_TYPE(T_STRUCT); + COUNT_TYPE(T_BIGNUM); + COUNT_TYPE(T_FILE); + COUNT_TYPE(T_DATA); + COUNT_TYPE(T_MATCH); + COUNT_TYPE(T_COMPLEX); + COUNT_TYPE(T_RATIONAL); + COUNT_TYPE(T_NIL); + COUNT_TYPE(T_TRUE); + COUNT_TYPE(T_FALSE); + COUNT_TYPE(T_SYMBOL); + COUNT_TYPE(T_FIXNUM); + COUNT_TYPE(T_UNDEF); + COUNT_TYPE(T_NODE); + COUNT_TYPE(T_ICLASS); + COUNT_TYPE(T_ZOMBIE); +#undef COUNT_TYPE + default: type = INT2NUM(i); break; + } + if (counts[i]) + rb_hash_aset(hash, type, SIZET2NUM(counts[i])); + } + + return hash; +} + + + +/* + ------------------------ Garbage Collection ------------------------ +*/ + +/* Sweeping */ + +static VALUE +lazy_sweep_enable(void) +{ + rb_objspace_t *objspace = &rb_objspace; + + objspace->flags.dont_lazy_sweep = FALSE; + return Qnil; +} static void -gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) +gc_clear_slot_bits(struct heaps_slot *slot) { - register RVALUE *obj = RANY(ptr); + memset(GET_HEAP_BITMAP(slot->slot), 0, + HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); +} - goto marking; /* skip */ +static void +slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot) +{ + size_t free_num = 0, final_num = 0; + RVALUE *p, *pend; + RVALUE *final = deferred_final_list; + int deferred; + uintptr_t *bits; - again: - obj = RANY(ptr); - if (rb_special_const_p(ptr)) return; /* special const not marked */ - if (obj->as.basic.flags == 0) return; /* free cell */ - if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */ + p = sweep_slot->slot; pend = p + sweep_slot->limit; + bits = GET_HEAP_BITMAP(p); + while (p < pend) { + if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) { + if (p->as.basic.flags) { + if ((deferred = obj_free(objspace, (VALUE)p)) || + (FL_TEST(p, FL_FINALIZE))) { + if (!deferred) { + p->as.free.flags = T_ZOMBIE; + RDATA(p)->dfree = 0; + } + p->as.free.next = deferred_final_list; + deferred_final_list = p; + assert(BUILTIN_TYPE(p) == T_ZOMBIE); + final_num++; + } + else { + VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); + p->as.free.flags = 0; + p->as.free.next = sweep_slot->freelist; + sweep_slot->freelist = p; + free_num++; + } + } + else { + free_num++; + } + } + p++; + } + gc_clear_slot_bits(sweep_slot); + if (final_num + free_num == sweep_slot->limit && + objspace->heap.free_num > objspace->heap.do_heap_free) { + RVALUE *pp; - marking: - if (FL_TEST(obj, FL_EXIVAR)) { - rb_mark_generic_ivar(ptr); + for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) { + RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot; + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ + } + sweep_slot->limit = final_num; + unlink_heap_slot(objspace, sweep_slot); } + else { + if (free_num > 0) { + link_free_heap_slot(objspace, sweep_slot); + } + else { + sweep_slot->free_next = NULL; + } + objspace->heap.free_num += free_num; + } + objspace->heap.final_num += final_num; - switch (BUILTIN_TYPE(obj)) { - case T_NIL: - case T_FIXNUM: - rb_bug("rb_gc_mark() called for broken object"); - break; + if (deferred_final_list && !finalizing) { + rb_thread_t *th = GET_THREAD(); + if (th) { + RUBY_VM_SET_FINALIZER_INTERRUPT(th); + } + } +} - case T_NODE: - switch (nd_type(obj)) { - case NODE_IF: /* 1,2,3 */ - case NODE_FOR: - case NODE_ITER: - case NODE_WHEN: - case NODE_MASGN: - case NODE_RESCUE: - case NODE_RESBODY: - case NODE_CLASS: - case NODE_BLOCK_PASS: - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); - /* fall through */ - case NODE_BLOCK: /* 1,3 */ - case NODE_OPTBLOCK: - case NODE_ARRAY: - case NODE_DSTR: - case NODE_DXSTR: - case NODE_DREGX: - case NODE_DREGX_ONCE: - case NODE_ENSURE: - case NODE_CALL: - case NODE_DEFS: - case NODE_OP_ASGN1: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); - /* fall through */ - case NODE_SUPER: /* 3 */ - case NODE_FCALL: - case NODE_DEFN: - case NODE_ARGS_AUX: - ptr = (VALUE)obj->as.node.u3.node; - goto again; +static int +ready_to_gc(rb_objspace_t *objspace) +{ + if (dont_gc || during_gc) { + if (!has_free_object) { + if (!heaps_increment(objspace)) { + set_heaps_increment(objspace); + heaps_increment(objspace); + } + } + return FALSE; + } + return TRUE; +} - case NODE_WHILE: /* 1,2 */ - case NODE_UNTIL: - case NODE_AND: - case NODE_OR: - case NODE_CASE: - case NODE_SCLASS: - case NODE_DOT2: - case NODE_DOT3: - case NODE_FLIP2: - case NODE_FLIP3: - case NODE_MATCH2: - case NODE_MATCH3: - case NODE_OP_ASGN_OR: - case NODE_OP_ASGN_AND: - case NODE_MODULE: - case NODE_ALIAS: - case NODE_VALIAS: - case NODE_ARGSCAT: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); - /* fall through */ - case NODE_GASGN: /* 2 */ - case NODE_LASGN: - case NODE_DASGN: - case NODE_DASGN_CURR: - case NODE_IASGN: - case NODE_IASGN2: - case NODE_CVASGN: - case NODE_COLON3: - case NODE_OPT_N: - case NODE_EVSTR: - case NODE_UNDEF: - case NODE_POSTEXE: - ptr = (VALUE)obj->as.node.u2.node; - goto again; +static void +before_gc_sweep(rb_objspace_t *objspace) +{ + objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65); + objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2); + if (objspace->heap.free_min < initial_free_min) { + objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT; + objspace->heap.free_min = initial_free_min; + } + objspace->heap.sweep_slots = heaps; + objspace->heap.free_num = 0; + objspace->heap.free_slots = NULL; - case NODE_HASH: /* 1 */ - case NODE_LIT: - case NODE_STR: - case NODE_XSTR: - case NODE_DEFINED: - case NODE_MATCH: - case NODE_RETURN: - case NODE_BREAK: - case NODE_NEXT: - case NODE_YIELD: - case NODE_COLON2: - case NODE_SPLAT: - case NODE_TO_ARY: - ptr = (VALUE)obj->as.node.u1.node; - goto again; + /* sweep unlinked method entries */ + if (GET_VM()->unlinked_method_entry_list) { + rb_sweep_method_entry(GET_VM()); + } +} + +static void +after_gc_sweep(rb_objspace_t *objspace) +{ + size_t inc; + + gc_prof_set_malloc_info(objspace); + if (objspace->heap.free_num < objspace->heap.free_min) { + set_heaps_increment(objspace); + heaps_increment(objspace); + } + + inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0); + if (inc > malloc_limit) { + malloc_limit += (size_t)((inc - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT)); + if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit; + } + + free_unused_heaps(objspace); +} + +static int +lazy_sweep(rb_objspace_t *objspace) +{ + struct heaps_slot *next; + + heaps_increment(objspace); + while (objspace->heap.sweep_slots) { + next = objspace->heap.sweep_slots->next; + slot_sweep(objspace, objspace->heap.sweep_slots); + objspace->heap.sweep_slots = next; + if (has_free_object) { + during_gc = 0; + return TRUE; + } + } + return FALSE; +} + +static void +rest_sweep(rb_objspace_t *objspace) +{ + if (objspace->heap.sweep_slots) { + while (objspace->heap.sweep_slots) { + lazy_sweep(objspace); + } + after_gc_sweep(objspace); + } +} + +static void gc_marks(rb_objspace_t *objspace); + +static int +gc_lazy_sweep(rb_objspace_t *objspace) +{ + int res; + + if (objspace->flags.dont_lazy_sweep) + return garbage_collect(objspace); + + + if (!ready_to_gc(objspace)) return TRUE; + + during_gc++; + gc_prof_timer_start(objspace); + gc_prof_sweep_timer_start(objspace); + + if (objspace->heap.sweep_slots) { + res = lazy_sweep(objspace); + if (res) { + gc_prof_sweep_timer_stop(objspace); + gc_prof_set_malloc_info(objspace); + gc_prof_timer_stop(objspace, Qfalse); + return res; + } + after_gc_sweep(objspace); + } + else { + if (heaps_increment(objspace)) { + during_gc = 0; + return TRUE; + } + } - case NODE_SCOPE: /* 2,3 */ - case NODE_CDECL: - case NODE_OPT_ARG: - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); - ptr = (VALUE)obj->as.node.u2.node; - goto again; + gc_marks(objspace); - case NODE_ARGS: /* custom */ - { - struct rb_args_info *args = obj->as.node.u3.args; - if (args) { - if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init, lev); - if (args->post_init) gc_mark(objspace, (VALUE)args->post_init, lev); - if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args, lev); - if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args, lev); - if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg, lev); - } - } - ptr = (VALUE)obj->as.node.u2.node; - goto again; + before_gc_sweep(objspace); + if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) { + set_heaps_increment(objspace); + } - case NODE_ZARRAY: /* - */ - case NODE_ZSUPER: - case NODE_VCALL: - case NODE_GVAR: - case NODE_LVAR: - case NODE_DVAR: - case NODE_IVAR: - case NODE_CVAR: - case NODE_NTH_REF: - case NODE_BACK_REF: - case NODE_REDO: - case NODE_RETRY: - case NODE_SELF: - case NODE_NIL: - case NODE_TRUE: - case NODE_FALSE: - case NODE_ERRINFO: - case NODE_BLOCK_ARG: - break; - case NODE_ALLOCA: - mark_locations_array(objspace, - (VALUE*)obj->as.node.u1.value, - obj->as.node.u3.cnt); - ptr = (VALUE)obj->as.node.u2.node; - goto again; + gc_prof_sweep_timer_start(objspace); + if (!(res = lazy_sweep(objspace))) { + after_gc_sweep(objspace); + if (has_free_object) { + res = TRUE; + during_gc = 0; + } + } + gc_prof_sweep_timer_stop(objspace); - case NODE_CREF: - gc_mark(objspace, obj->as.node.nd_omod, lev); - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); - ptr = (VALUE)obj->as.node.u3.node; - goto again; + gc_prof_timer_stop(objspace, Qtrue); + return res; +} - default: /* unlisted NODE */ - if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); - } - if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); - } - if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); - } - } - return; /* no need to mark class. */ +static void +gc_sweep(rb_objspace_t *objspace) +{ + struct heaps_slot *next; + + before_gc_sweep(objspace); + + while (objspace->heap.sweep_slots) { + next = objspace->heap.sweep_slots->next; + slot_sweep(objspace, objspace->heap.sweep_slots); + objspace->heap.sweep_slots = next; } - gc_mark(objspace, obj->as.basic.klass, lev); - switch (BUILTIN_TYPE(obj)) { - case T_ICLASS: - case T_CLASS: - case T_MODULE: - mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev); - if (!RCLASS_EXT(obj)) break; - mark_tbl(objspace, RCLASS_IV_TBL(obj), lev); - mark_const_tbl(objspace, RCLASS_CONST_TBL(obj), lev); - ptr = RCLASS_SUPER(obj); - goto again; + after_gc_sweep(objspace); - case T_ARRAY: - if (FL_TEST(obj, ELTS_SHARED)) { - ptr = obj->as.array.as.heap.aux.shared; - goto again; - } - else { - long i, len = RARRAY_LEN(obj); - VALUE *ptr = RARRAY_PTR(obj); - for (i=0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); - } - } - break; + during_gc = 0; +} - case T_HASH: - mark_hash(objspace, obj->as.hash.ntbl, lev); - ptr = obj->as.hash.ifnone; - goto again; +/* Marking */ - case T_STRING: -#define STR_ASSOC FL_USER3 /* copied from string.c */ - if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) { - ptr = obj->as.string.as.heap.aux.shared; - goto again; - } - break; +#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p))) - case T_DATA: - if (RTYPEDDATA_P(obj)) { - RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark; - if (mark_func) (*mark_func)(DATA_PTR(obj)); - } - else { - if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); - } - break; - case T_OBJECT: - { - long i, len = ROBJECT_NUMIV(obj); - VALUE *ptr = ROBJECT_IVPTR(obj); - for (i = 0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); - } - } - break; +#ifdef __ia64 +#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp()) +#else +#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end) +#endif - case T_FILE: - if (obj->as.file.fptr) { - gc_mark(objspace, obj->as.file.fptr->pathv, lev); - gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->write_lock, lev); - } - break; +#define STACK_START (th->machine_stack_start) +#define STACK_END (th->machine_stack_end) +#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE)) - case T_REGEXP: - gc_mark(objspace, obj->as.regexp.src, lev); - break; +#if STACK_GROW_DIRECTION < 0 +# define STACK_LENGTH (size_t)(STACK_START - STACK_END) +#elif STACK_GROW_DIRECTION > 0 +# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1) +#else +# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \ + : (size_t)(STACK_END - STACK_START + 1)) +#endif +#if !STACK_GROW_DIRECTION +int ruby_stack_grow_direction; +int +ruby_get_stack_grow_direction(volatile VALUE *addr) +{ + VALUE *end; + SET_MACHINE_STACK_END(&end); - case T_FLOAT: - case T_BIGNUM: - case T_ZOMBIE: - break; + if (end > addr) return ruby_stack_grow_direction = 1; + return ruby_stack_grow_direction = -1; +} +#endif - case T_MATCH: - gc_mark(objspace, obj->as.match.regexp, lev); - if (obj->as.match.str) { - ptr = obj->as.match.str; - goto again; - } - break; +#define GC_LEVEL_MAX 250 +#define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD) - case T_RATIONAL: - gc_mark(objspace, obj->as.rational.num, lev); - gc_mark(objspace, obj->as.rational.den, lev); - break; +size_t +ruby_stack_length(VALUE **p) +{ + rb_thread_t *th = GET_THREAD(); + SET_STACK_END; + if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END); + return STACK_LENGTH; +} - case T_COMPLEX: - gc_mark(objspace, obj->as.complex.real, lev); - gc_mark(objspace, obj->as.complex.imag, lev); - break; +static int +stack_check(int water_mark) +{ + int ret; + rb_thread_t *th = GET_THREAD(); + SET_STACK_END; + ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark; +#ifdef __ia64 + if (!ret) { + ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start > + th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark; + } +#endif + return ret; +} - case T_STRUCT: - { - long len = RSTRUCT_LEN(obj); - VALUE *ptr = RSTRUCT_PTR(obj); +#define STACKFRAME_FOR_CALL_CFUNC 512 + +int +ruby_stack_check(void) +{ +#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) + return 0; +#else + return stack_check(STACKFRAME_FOR_CALL_CFUNC); +#endif +} + +static void +init_mark_stack(rb_objspace_t *objspace) +{ + mark_stack_overflow = 0; + mark_stack_ptr = mark_stack; +} - while (len--) { - gc_mark(objspace, *ptr++, lev); - } +static void +mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n) +{ + VALUE v; + while (n--) { + v = *x; + VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)); + if (is_pointer_to_heap(objspace, (void *)v)) { + gc_mark(objspace, v, 0); } - break; - - default: - rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s", - BUILTIN_TYPE(obj), (void *)obj, - is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object"); + x++; } } -static int obj_free(rb_objspace_t *, VALUE); +static void +gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end) +{ + long n; -static inline struct heaps_slot * -add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p) + if (end <= start) return; + n = end - start; + mark_locations_array(objspace, start, n); +} + +void +rb_gc_mark_locations(VALUE *start, VALUE *end) { - struct heaps_slot *slot; + gc_mark_locations(&rb_objspace, start, end); +} - VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); - p->as.free.flags = 0; - slot = GET_HEAP_SLOT(p); - p->as.free.next = slot->freelist; - slot->freelist = p; +#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end)) - return slot; +struct mark_tbl_arg { + rb_objspace_t *objspace; + int lev; +}; + +static int +mark_entry(st_data_t key, st_data_t value, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, (VALUE)value, arg->lev); + return ST_CONTINUE; } static void -finalize_list(rb_objspace_t *objspace, RVALUE *p) +mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) { - while (p) { - RVALUE *tmp = p->as.free.next; - run_final(objspace, (VALUE)p); - if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */ - add_slot_local_freelist(objspace, p); - if (!is_lazy_sweeping(objspace)) { - gc_prof_dec_live_num(objspace); - } - } - else { - struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark; - slot->limit--; - } - p = tmp; - } + struct mark_tbl_arg arg; + if (!tbl || tbl->num_entries == 0) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_entry, (st_data_t)&arg); } -static void -unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot) +static int +mark_key(st_data_t key, st_data_t value, st_data_t data) { - if (slot->prev) - slot->prev->next = slot->next; - if (slot->next) - slot->next->prev = slot->prev; - if (heaps == slot) - heaps = slot->next; - if (objspace->heap.sweep_slots == slot) - objspace->heap.sweep_slots = slot->next; - slot->prev = NULL; - slot->next = NULL; + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, (VALUE)key, arg->lev); + return ST_CONTINUE; } static void -free_unused_heaps(rb_objspace_t *objspace) +mark_set(rb_objspace_t *objspace, st_table *tbl, int lev) { - size_t i, j; - RVALUE *last = 0; + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_key, (st_data_t)&arg); +} - for (i = j = 1; j < heaps_used; i++) { - if (objspace->heap.sorted[i].slot->limit == 0) { - struct heaps_slot* h = objspace->heap.sorted[i].slot; - ((struct heaps_free_bitmap *)(h->bits))->next = - objspace->heap.free_bitmap; - objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits; - if (!last) { - last = objspace->heap.sorted[i].slot->membase; - } - else { - aligned_free(objspace->heap.sorted[i].slot->membase); - } - free(objspace->heap.sorted[i].slot); - heaps_used--; - } - else { - if (i != j) { - objspace->heap.sorted[j] = objspace->heap.sorted[i]; - } - j++; - } - } - if (last) { - if (last < heaps_freed) { - aligned_free(heaps_freed); - heaps_freed = last; - } - else { - aligned_free(last); - } - } +void +rb_mark_set(st_table *tbl) +{ + mark_set(&rb_objspace, tbl, 0); +} + +static int +mark_keyvalue(st_data_t key, st_data_t value, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, (VALUE)key, arg->lev); + gc_mark(arg->objspace, (VALUE)value, arg->lev); + return ST_CONTINUE; } static void -gc_clear_slot_bits(struct heaps_slot *slot) +mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev) { - memset(GET_HEAP_BITMAP(slot->slot), 0, - HEAP_BITMAP_LIMIT * sizeof(uintptr_t)); + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_keyvalue, (st_data_t)&arg); +} + +void +rb_mark_hash(st_table *tbl) +{ + mark_hash(&rb_objspace, tbl, 0); } static void -slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot) +mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev) { - size_t free_num = 0, final_num = 0; - RVALUE *p, *pend; - RVALUE *final = deferred_final_list; - int deferred; - uintptr_t *bits; + const rb_method_definition_t *def = me->def; - p = sweep_slot->slot; pend = p + sweep_slot->limit; - bits = GET_HEAP_BITMAP(p); - while (p < pend) { - if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) { - if (p->as.basic.flags) { - if ((deferred = obj_free(objspace, (VALUE)p)) || - (FL_TEST(p, FL_FINALIZE))) { - if (!deferred) { - p->as.free.flags = T_ZOMBIE; - RDATA(p)->dfree = 0; - } - p->as.free.next = deferred_final_list; - deferred_final_list = p; - assert(BUILTIN_TYPE(p) == T_ZOMBIE); - final_num++; - } - else { - VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); - p->as.free.flags = 0; - p->as.free.next = sweep_slot->freelist; - sweep_slot->freelist = p; - free_num++; - } - } - else { - free_num++; - } - } - p++; + gc_mark(objspace, me->klass, lev); + if (!def) return; + switch (def->type) { + case VM_METHOD_TYPE_ISEQ: + gc_mark(objspace, def->body.iseq->self, lev); + break; + case VM_METHOD_TYPE_BMETHOD: + gc_mark(objspace, def->body.proc, lev); + break; + case VM_METHOD_TYPE_ATTRSET: + case VM_METHOD_TYPE_IVAR: + gc_mark(objspace, def->body.attr.location, lev); + break; + default: + break; /* ignore */ } - gc_clear_slot_bits(sweep_slot); - if (final_num + free_num == sweep_slot->limit && - objspace->heap.free_num > objspace->heap.do_heap_free) { - RVALUE *pp; +} - for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) { - RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot; - pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ - } - sweep_slot->limit = final_num; - unlink_heap_slot(objspace, sweep_slot); - } - else { - if (free_num > 0) { - link_free_heap_slot(objspace, sweep_slot); - } - else { - sweep_slot->free_next = NULL; - } - objspace->heap.free_num += free_num; - } - objspace->heap.final_num += final_num; +void +rb_mark_method_entry(const rb_method_entry_t *me) +{ + mark_method_entry(&rb_objspace, me, 0); +} - if (deferred_final_list && !finalizing) { - rb_thread_t *th = GET_THREAD(); - if (th) { - RUBY_VM_SET_FINALIZER_INTERRUPT(th); - } - } +static int +mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + mark_method_entry(arg->objspace, me, arg->lev); + return ST_CONTINUE; +} + +static void +mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +{ + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg); } static int -ready_to_gc(rb_objspace_t *objspace) +mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data) { - if (dont_gc || during_gc) { - if (!has_free_object) { - if (!heaps_increment(objspace)) { - set_heaps_increment(objspace); - heaps_increment(objspace); - } - } - return FALSE; - } - return TRUE; + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, ce->value, arg->lev); + gc_mark(arg->objspace, ce->file, arg->lev); + return ST_CONTINUE; } static void -before_gc_sweep(rb_objspace_t *objspace) +mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) { - objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65); - objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2); - if (objspace->heap.free_min < initial_free_min) { - objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT; - objspace->heap.free_min = initial_free_min; - } - objspace->heap.sweep_slots = heaps; - objspace->heap.free_num = 0; - objspace->heap.free_slots = NULL; - - /* sweep unlinked method entries */ - if (GET_VM()->unlinked_method_entry_list) { - rb_sweep_method_entry(GET_VM()); - } + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg); } +#if STACK_GROW_DIRECTION < 0 +#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START) +#elif STACK_GROW_DIRECTION > 0 +#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix)) +#else +#define GET_STACK_BOUNDS(start, end, appendix) \ + ((STACK_END < STACK_START) ? \ + ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix))) +#endif + +#define numberof(array) (int)(sizeof(array) / sizeof((array)[0])) + static void -after_gc_sweep(rb_objspace_t *objspace) +mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th) { - size_t inc; + union { + rb_jmp_buf j; + VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)]; + } save_regs_gc_mark; + VALUE *stack_start, *stack_end; - gc_prof_set_malloc_info(objspace); - if (objspace->heap.free_num < objspace->heap.free_min) { - set_heaps_increment(objspace); - heaps_increment(objspace); - } + FLUSH_REGISTER_WINDOWS; + /* This assumes that all registers are saved into the jmp_buf (and stack) */ + rb_setjmp(save_regs_gc_mark.j); - inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0); - if (inc > malloc_limit) { - malloc_limit += (size_t)((inc - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT)); - if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit; - } + SET_STACK_END; + GET_STACK_BOUNDS(stack_start, stack_end, 1); - free_unused_heaps(objspace); + mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v)); + + rb_gc_mark_locations(stack_start, stack_end); +#ifdef __ia64 + rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end); +#endif +#if defined(__mc68000__) + mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2), + (STACK_START - STACK_END)); +#endif } -static int -lazy_sweep(rb_objspace_t *objspace) +void +rb_gc_mark_machine_stack(rb_thread_t *th) { - struct heaps_slot *next; + rb_objspace_t *objspace = &rb_objspace; + VALUE *stack_start, *stack_end; - heaps_increment(objspace); - while (objspace->heap.sweep_slots) { - next = objspace->heap.sweep_slots->next; - slot_sweep(objspace, objspace->heap.sweep_slots); - objspace->heap.sweep_slots = next; - if (has_free_object) { - during_gc = 0; - return TRUE; - } - } - return FALSE; + GET_STACK_BOUNDS(stack_start, stack_end, 0); + rb_gc_mark_locations(stack_start, stack_end); +#ifdef __ia64 + rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end); +#endif } -static void -rest_sweep(rb_objspace_t *objspace) +void +rb_mark_tbl(st_table *tbl) { - if (objspace->heap.sweep_slots) { - while (objspace->heap.sweep_slots) { - lazy_sweep(objspace); - } - after_gc_sweep(objspace); - } + mark_tbl(&rb_objspace, tbl, 0); } -static void gc_marks(rb_objspace_t *objspace); +void +rb_gc_mark_maybe(VALUE obj) +{ + if (is_pointer_to_heap(&rb_objspace, (void *)obj)) { + gc_mark(&rb_objspace, obj, 0); + } +} static int -gc_lazy_sweep(rb_objspace_t *objspace) +gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr) { - int res; - - if (objspace->flags.dont_lazy_sweep) - return garbage_collect(objspace); - - - if (!ready_to_gc(objspace)) return TRUE; - - during_gc++; - gc_prof_timer_start(objspace); - gc_prof_sweep_timer_start(objspace); - - if (objspace->heap.sweep_slots) { - res = lazy_sweep(objspace); - if (res) { - gc_prof_sweep_timer_stop(objspace); - gc_prof_set_malloc_info(objspace); - gc_prof_timer_stop(objspace, Qfalse); - return res; - } - after_gc_sweep(objspace); - } - else { - if (heaps_increment(objspace)) { - during_gc = 0; - return TRUE; - } - } + register uintptr_t *bits = GET_HEAP_BITMAP(ptr); + if (MARKED_IN_BITMAP(bits, ptr)) return 0; + MARK_IN_BITMAP(bits, ptr); + objspace->heap.live_num++; + return 1; +} - gc_marks(objspace); +static void +gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) +{ + register RVALUE *obj; - before_gc_sweep(objspace); - if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) { - set_heaps_increment(objspace); - } + obj = RANY(ptr); + if (rb_special_const_p(ptr)) return; /* special const not marked */ + if (obj->as.basic.flags == 0) return; /* free cell */ + if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */ - gc_prof_sweep_timer_start(objspace); - if (!(res = lazy_sweep(objspace))) { - after_gc_sweep(objspace); - if (has_free_object) { - res = TRUE; - during_gc = 0; - } + if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) { + if (!mark_stack_overflow) { + if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { + *mark_stack_ptr = ptr; + mark_stack_ptr++; + } + else { + mark_stack_overflow = 1; + } + } + return; } - gc_prof_sweep_timer_stop(objspace); + gc_mark_children(objspace, ptr, lev+1); +} - gc_prof_timer_stop(objspace, Qtrue); - return res; +void +rb_gc_mark(VALUE ptr) +{ + gc_mark(&rb_objspace, ptr, 0); } static void -gc_sweep(rb_objspace_t *objspace) +gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) { - struct heaps_slot *next; + register RVALUE *obj = RANY(ptr); - before_gc_sweep(objspace); + goto marking; /* skip */ - while (objspace->heap.sweep_slots) { - next = objspace->heap.sweep_slots->next; - slot_sweep(objspace, objspace->heap.sweep_slots); - objspace->heap.sweep_slots = next; + again: + obj = RANY(ptr); + if (rb_special_const_p(ptr)) return; /* special const not marked */ + if (obj->as.basic.flags == 0) return; /* free cell */ + if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */ + + marking: + if (FL_TEST(obj, FL_EXIVAR)) { + rb_mark_generic_ivar(ptr); } - after_gc_sweep(objspace); + switch (BUILTIN_TYPE(obj)) { + case T_NIL: + case T_FIXNUM: + rb_bug("rb_gc_mark() called for broken object"); + break; + + case T_NODE: + switch (nd_type(obj)) { + case NODE_IF: /* 1,2,3 */ + case NODE_FOR: + case NODE_ITER: + case NODE_WHEN: + case NODE_MASGN: + case NODE_RESCUE: + case NODE_RESBODY: + case NODE_CLASS: + case NODE_BLOCK_PASS: + gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + /* fall through */ + case NODE_BLOCK: /* 1,3 */ + case NODE_OPTBLOCK: + case NODE_ARRAY: + case NODE_DSTR: + case NODE_DXSTR: + case NODE_DREGX: + case NODE_DREGX_ONCE: + case NODE_ENSURE: + case NODE_CALL: + case NODE_DEFS: + case NODE_OP_ASGN1: + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + /* fall through */ + case NODE_SUPER: /* 3 */ + case NODE_FCALL: + case NODE_DEFN: + case NODE_ARGS_AUX: + ptr = (VALUE)obj->as.node.u3.node; + goto again; - during_gc = 0; -} + case NODE_WHILE: /* 1,2 */ + case NODE_UNTIL: + case NODE_AND: + case NODE_OR: + case NODE_CASE: + case NODE_SCLASS: + case NODE_DOT2: + case NODE_DOT3: + case NODE_FLIP2: + case NODE_FLIP3: + case NODE_MATCH2: + case NODE_MATCH3: + case NODE_OP_ASGN_OR: + case NODE_OP_ASGN_AND: + case NODE_MODULE: + case NODE_ALIAS: + case NODE_VALIAS: + case NODE_ARGSCAT: + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + /* fall through */ + case NODE_GASGN: /* 2 */ + case NODE_LASGN: + case NODE_DASGN: + case NODE_DASGN_CURR: + case NODE_IASGN: + case NODE_IASGN2: + case NODE_CVASGN: + case NODE_COLON3: + case NODE_OPT_N: + case NODE_EVSTR: + case NODE_UNDEF: + case NODE_POSTEXE: + ptr = (VALUE)obj->as.node.u2.node; + goto again; -void -rb_gc_force_recycle(VALUE p) -{ - rb_objspace_t *objspace = &rb_objspace; - struct heaps_slot *slot; + case NODE_HASH: /* 1 */ + case NODE_LIT: + case NODE_STR: + case NODE_XSTR: + case NODE_DEFINED: + case NODE_MATCH: + case NODE_RETURN: + case NODE_BREAK: + case NODE_NEXT: + case NODE_YIELD: + case NODE_COLON2: + case NODE_SPLAT: + case NODE_TO_ARY: + ptr = (VALUE)obj->as.node.u1.node; + goto again; - if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) { - add_slot_local_freelist(objspace, (RVALUE *)p); - } - else { - gc_prof_dec_live_num(objspace); - slot = add_slot_local_freelist(objspace, (RVALUE *)p); - if (slot->free_next == NULL) { - link_free_heap_slot(objspace, slot); - } - } -} + case NODE_SCOPE: /* 2,3 */ + case NODE_CDECL: + case NODE_OPT_ARG: + gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + ptr = (VALUE)obj->as.node.u2.node; + goto again; -static inline void -make_deferred(RVALUE *p) -{ - p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE; -} + case NODE_ARGS: /* custom */ + { + struct rb_args_info *args = obj->as.node.u3.args; + if (args) { + if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init, lev); + if (args->post_init) gc_mark(objspace, (VALUE)args->post_init, lev); + if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args, lev); + if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args, lev); + if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg, lev); + } + } + ptr = (VALUE)obj->as.node.u2.node; + goto again; -static inline void -make_io_deferred(RVALUE *p) -{ - rb_io_t *fptr = p->as.file.fptr; - make_deferred(p); - p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize; - p->as.data.data = fptr; -} + case NODE_ZARRAY: /* - */ + case NODE_ZSUPER: + case NODE_VCALL: + case NODE_GVAR: + case NODE_LVAR: + case NODE_DVAR: + case NODE_IVAR: + case NODE_CVAR: + case NODE_NTH_REF: + case NODE_BACK_REF: + case NODE_REDO: + case NODE_RETRY: + case NODE_SELF: + case NODE_NIL: + case NODE_TRUE: + case NODE_FALSE: + case NODE_ERRINFO: + case NODE_BLOCK_ARG: + break; + case NODE_ALLOCA: + mark_locations_array(objspace, + (VALUE*)obj->as.node.u1.value, + obj->as.node.u3.cnt); + ptr = (VALUE)obj->as.node.u2.node; + goto again; -static int -obj_free(rb_objspace_t *objspace, VALUE obj) -{ - switch (BUILTIN_TYPE(obj)) { - case T_NIL: - case T_FIXNUM: - case T_TRUE: - case T_FALSE: - rb_bug("obj_free() called for broken object"); - break; - } + case NODE_CREF: + gc_mark(objspace, obj->as.node.nd_omod, lev); + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + ptr = (VALUE)obj->as.node.u3.node; + goto again; - if (FL_TEST(obj, FL_EXIVAR)) { - rb_free_generic_ivar((VALUE)obj); - FL_UNSET(obj, FL_EXIVAR); + default: /* unlisted NODE */ + if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) { + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + } + if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) { + gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + } + if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) { + gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + } + } + return; /* no need to mark class. */ } + gc_mark(objspace, obj->as.basic.klass, lev); switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) && - RANY(obj)->as.object.as.heap.ivptr) { - xfree(RANY(obj)->as.object.as.heap.ivptr); - } - break; - case T_MODULE: + case T_ICLASS: case T_CLASS: - rb_clear_cache_by_class((VALUE)obj); - if (RCLASS_M_TBL(obj)) { - rb_free_m_table(RCLASS_M_TBL(obj)); - } - if (RCLASS_IV_TBL(obj)) { - st_free_table(RCLASS_IV_TBL(obj)); - } - if (RCLASS_CONST_TBL(obj)) { - rb_free_const_table(RCLASS_CONST_TBL(obj)); - } - if (RCLASS_IV_INDEX_TBL(obj)) { - st_free_table(RCLASS_IV_INDEX_TBL(obj)); - } - xfree(RANY(obj)->as.klass.ptr); - break; - case T_STRING: - rb_str_free(obj); - break; + case T_MODULE: + mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev); + if (!RCLASS_EXT(obj)) break; + mark_tbl(objspace, RCLASS_IV_TBL(obj), lev); + mark_const_tbl(objspace, RCLASS_CONST_TBL(obj), lev); + ptr = RCLASS_SUPER(obj); + goto again; + case T_ARRAY: - rb_ary_free(obj); - break; - case T_HASH: - if (RANY(obj)->as.hash.ntbl) { - st_free_table(RANY(obj)->as.hash.ntbl); - } - break; - case T_REGEXP: - if (RANY(obj)->as.regexp.ptr) { - onig_free(RANY(obj)->as.regexp.ptr); + if (FL_TEST(obj, ELTS_SHARED)) { + ptr = obj->as.array.as.heap.aux.shared; + goto again; } - break; - case T_DATA: - if (DATA_PTR(obj)) { - if (RTYPEDDATA_P(obj)) { - RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree; - } - if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) { - xfree(DATA_PTR(obj)); - } - else if (RANY(obj)->as.data.dfree) { - make_deferred(RANY(obj)); - return 1; + else { + long i, len = RARRAY_LEN(obj); + VALUE *ptr = RARRAY_PTR(obj); + for (i=0; i < len; i++) { + gc_mark(objspace, *ptr++, lev); } } break; - case T_MATCH: - if (RANY(obj)->as.match.rmatch) { - struct rmatch *rm = RANY(obj)->as.match.rmatch; - onig_region_free(&rm->regs, 0); - if (rm->char_offset) - xfree(rm->char_offset); - xfree(rm); - } - break; - case T_FILE: - if (RANY(obj)->as.file.fptr) { - make_io_deferred(RANY(obj)); - return 1; + + case T_HASH: + mark_hash(objspace, obj->as.hash.ntbl, lev); + ptr = obj->as.hash.ifnone; + goto again; + + case T_STRING: +#define STR_ASSOC FL_USER3 /* copied from string.c */ + if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) { + ptr = obj->as.string.as.heap.aux.shared; + goto again; } break; - case T_RATIONAL: - case T_COMPLEX: + + case T_DATA: + if (RTYPEDDATA_P(obj)) { + RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark; + if (mark_func) (*mark_func)(DATA_PTR(obj)); + } + else { + if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); + } break; - case T_ICLASS: - /* iClass shares table with the module */ - xfree(RANY(obj)->as.klass.ptr); + + case T_OBJECT: + { + long i, len = ROBJECT_NUMIV(obj); + VALUE *ptr = ROBJECT_IVPTR(obj); + for (i = 0; i < len; i++) { + gc_mark(objspace, *ptr++, lev); + } + } break; + case T_FILE: + if (obj->as.file.fptr) { + gc_mark(objspace, obj->as.file.fptr->pathv, lev); + gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev); + gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev); + gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev); + gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev); + gc_mark(objspace, obj->as.file.fptr->write_lock, lev); + } + break; + + case T_REGEXP: + gc_mark(objspace, obj->as.regexp.src, lev); + break; + case T_FLOAT: + case T_BIGNUM: + case T_ZOMBIE: break; - case T_BIGNUM: - if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) { - xfree(RBIGNUM_DIGITS(obj)); + case T_MATCH: + gc_mark(objspace, obj->as.match.regexp, lev); + if (obj->as.match.str) { + ptr = obj->as.match.str; + goto again; } break; - case T_NODE: - switch (nd_type(obj)) { - case NODE_SCOPE: - if (RANY(obj)->as.node.u1.tbl) { - xfree(RANY(obj)->as.node.u1.tbl); - } - break; - case NODE_ARGS: - if (RANY(obj)->as.node.u3.args) { - xfree(RANY(obj)->as.node.u3.args); - } - break; - case NODE_ALLOCA: - xfree(RANY(obj)->as.node.u1.node); - break; - } - break; /* no need to free iv_tbl */ + + case T_RATIONAL: + gc_mark(objspace, obj->as.rational.num, lev); + gc_mark(objspace, obj->as.rational.den, lev); + break; + + case T_COMPLEX: + gc_mark(objspace, obj->as.complex.real, lev); + gc_mark(objspace, obj->as.complex.imag, lev); + break; case T_STRUCT: - if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 && - RANY(obj)->as.rstruct.as.heap.ptr) { - xfree(RANY(obj)->as.rstruct.as.heap.ptr); + { + long len = RSTRUCT_LEN(obj); + VALUE *ptr = RSTRUCT_PTR(obj); + + while (len--) { + gc_mark(objspace, *ptr++, lev); + } } break; default: - rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE, - BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags); + rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s", + BUILTIN_TYPE(obj), (void *)obj, + is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object"); } - - return 0; } -#define GC_NOTIFY 0 - -#if STACK_GROW_DIRECTION < 0 -#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START) -#elif STACK_GROW_DIRECTION > 0 -#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix)) -#else -#define GET_STACK_BOUNDS(start, end, appendix) \ - ((STACK_END < STACK_START) ? \ - ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix))) -#endif - -#define numberof(array) (int)(sizeof(array) / sizeof((array)[0])) - static void -mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th) +gc_mark_all(rb_objspace_t *objspace) { - union { - rb_jmp_buf j; - VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)]; - } save_regs_gc_mark; - VALUE *stack_start, *stack_end; + RVALUE *p, *pend; + size_t i; - FLUSH_REGISTER_WINDOWS; - /* This assumes that all registers are saved into the jmp_buf (and stack) */ - rb_setjmp(save_regs_gc_mark.j); + init_mark_stack(objspace); + for (i = 0; i < heaps_used; i++) { + p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; + while (p < pend) { + if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p) && + p->as.basic.flags) { + gc_mark_children(objspace, (VALUE)p, 0); + } + p++; + } + } +} - SET_STACK_END; - GET_STACK_BOUNDS(stack_start, stack_end, 1); +static void +gc_mark_rest(rb_objspace_t *objspace) +{ + VALUE tmp_arry[MARK_STACK_MAX]; + VALUE *p; - mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v)); + p = (mark_stack_ptr - mark_stack) + tmp_arry; + MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry); - rb_gc_mark_locations(stack_start, stack_end); -#ifdef __ia64 - rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end); -#endif -#if defined(__mc68000__) - mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2), - (STACK_START - STACK_END)); -#endif + init_mark_stack(objspace); + while (p != tmp_arry) { + p--; + gc_mark_children(objspace, *p, 0); + } } +#define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack) + static void gc_marks(rb_objspace_t *objspace) { @@ -2565,6 +2793,70 @@ gc_marks(rb_objspace_t *objspace) gc_prof_mark_timer_stop(objspace); } +/* GC */ + +void +rb_gc_force_recycle(VALUE p) +{ + rb_objspace_t *objspace = &rb_objspace; + struct heaps_slot *slot; + + if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) { + add_slot_local_freelist(objspace, (RVALUE *)p); + } + else { + gc_prof_dec_live_num(objspace); + slot = add_slot_local_freelist(objspace, (RVALUE *)p); + if (slot->free_next == NULL) { + link_free_heap_slot(objspace, slot); + } + } +} + +void +rb_gc_register_mark_object(VALUE obj) +{ + VALUE ary = GET_THREAD()->vm->mark_object_ary; + rb_ary_push(ary, obj); +} + +void +rb_gc_register_address(VALUE *addr) +{ + rb_objspace_t *objspace = &rb_objspace; + struct gc_list *tmp; + + tmp = ALLOC(struct gc_list); + tmp->next = global_List; + tmp->varptr = addr; + global_List = tmp; +} + +void +rb_gc_unregister_address(VALUE *addr) +{ + rb_objspace_t *objspace = &rb_objspace; + struct gc_list *tmp = global_List; + + if (tmp->varptr == addr) { + global_List = tmp->next; + xfree(tmp); + return; + } + while (tmp->next) { + if (tmp->next->varptr == addr) { + struct gc_list *t = tmp->next; + + tmp->next = tmp->next->next; + xfree(t); + break; + } + tmp = tmp->next; + } +} + +#define GC_NOTIFY 0 + static int garbage_collect(rb_objspace_t *objspace) { @@ -2593,26 +2885,45 @@ garbage_collect(rb_objspace_t *objspace) return TRUE; } +static void * +gc_with_gvl(void *ptr) +{ + return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr); +} + +static int +garbage_collect_with_gvl(rb_objspace_t *objspace) +{ + if (dont_gc) return TRUE; + if (ruby_thread_has_gvl_p()) { + return garbage_collect(objspace); + } + else { + if (ruby_native_thread_p()) { + return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace); + } + else { + /* no ruby thread */ + fprintf(stderr, "[FATAL] failed to allocate memory\n"); + exit(EXIT_FAILURE); + } + } +} + int rb_garbage_collect(void) { return garbage_collect(&rb_objspace); } -void -rb_gc_mark_machine_stack(rb_thread_t *th) -{ - rb_objspace_t *objspace = &rb_objspace; - VALUE *stack_start, *stack_end; +#undef Init_stack - GET_STACK_BOUNDS(stack_start, stack_end, 0); - rb_gc_mark_locations(stack_start, stack_end); -#ifdef __ia64 - rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end); -#endif +void +Init_stack(volatile VALUE *addr) +{ + ruby_init_stack(addr); } - /* * call-seq: * GC.start -> nil @@ -2630,838 +2941,569 @@ rb_gc_start(void) return Qnil; } -#undef Init_stack - void -Init_stack(volatile VALUE *addr) +rb_gc(void) { - ruby_init_stack(addr); + rb_objspace_t *objspace = &rb_objspace; + garbage_collect(objspace); + if (!finalizing) finalize_deferred(objspace); + free_unused_heaps(objspace); +} + +int +rb_during_gc(void) +{ + rb_objspace_t *objspace = &rb_objspace; + return during_gc; } /* - * Document-class: ObjectSpace - * - * The ObjectSpace module contains a number of routines - * that interact with the garbage collection facility and allow you to - * traverse all living objects with an iterator. - * - * ObjectSpace also provides support for object - * finalizers, procs that will be called when a specific object is - * about to be destroyed by garbage collection. - * - * include ObjectSpace - * - * - * a = "A" - * b = "B" - * c = "C" - * - * - * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" }) - * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" }) - * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" }) + * call-seq: + * GC.count -> Integer * - * produces: + * The number of times GC occurred. * - * Finalizer three on 537763470 - * Finalizer one on 537763480 - * Finalizer two on 537763480 + * It returns the number of times GC occurred since the process started. * */ -void -Init_heap(void) -{ - init_heap(&rb_objspace); -} - -static VALUE -lazy_sweep_enable(void) -{ - rb_objspace_t *objspace = &rb_objspace; - - objspace->flags.dont_lazy_sweep = FALSE; - return Qnil; -} - -typedef int each_obj_callback(void *, void *, size_t, void *); - -struct each_obj_args { - each_obj_callback *callback; - void *data; -}; - static VALUE -objspace_each_objects(VALUE arg) +gc_count(VALUE self) { - size_t i; - RVALUE *membase = 0; - RVALUE *pstart, *pend; - rb_objspace_t *objspace = &rb_objspace; - struct each_obj_args *args = (struct each_obj_args *)arg; - volatile VALUE v; - - i = 0; - while (i < heaps_used) { - while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase) - i--; - while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase) - i++; - if (heaps_used <= i) - break; - membase = objspace->heap.sorted[i].slot->membase; - - pstart = objspace->heap.sorted[i].slot->slot; - pend = pstart + objspace->heap.sorted[i].slot->limit; - - for (; pstart != pend; pstart++) { - if (pstart->as.basic.flags) { - v = (VALUE)pstart; /* acquire to save this object */ - break; - } - } - if (pstart != pend) { - if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) { - break; - } - } - } - RB_GC_GUARD(v); - - return Qnil; + return UINT2NUM(rb_objspace.count); } /* - * rb_objspace_each_objects() is special C API to walk through - * Ruby object space. This C API is too difficult to use it. - * To be frank, you should not use it. Or you need to read the - * source code of this function and understand what this function does. + * call-seq: + * GC.stat -> Hash * - * 'callback' will be called several times (the number of heap slot, - * at current implementation) with: - * vstart: a pointer to the first living object of the heap_slot. - * vend: a pointer to next to the valid heap_slot area. - * stride: a distance to next VALUE. + * Returns a Hash containing information about the GC. * - * If callback() returns non-zero, the iteration will be stopped. + * The hash includes information about internal statistics about GC such as: * - * This is a sample callback code to iterate liveness objects: + * { + * :count => 18, + * :heap_used => 77, + * :heap_length => 77, + * :heap_increment => 0, + * :heap_live_num => 23287, + * :heap_free_num => 8115, + * :heap_final_num => 0, + * } * - * int - * sample_callback(void *vstart, void *vend, int stride, void *data) { - * VALUE v = (VALUE)vstart; - * for (; v != (VALUE)vend; v += stride) { - * if (RBASIC(v)->flags) { // liveness check - * // do something with live object 'v' - * } - * return 0; // continue to iteration - * } + * The contents of the hash are implementation defined and may be changed in + * the future. * - * Note: 'vstart' is not a top of heap_slot. This point the first - * living object to grasp at least one object to avoid GC issue. - * This means that you can not walk through all Ruby object slot - * including freed object slot. + * This method is only expected to work on C Ruby. * - * Note: On this implementation, 'stride' is same as sizeof(RVALUE). - * However, there are possibilities to pass variable values with - * 'stride' with some reasons. You must use stride instead of - * use some constant value in the iteration. */ -void -rb_objspace_each_objects(each_obj_callback *callback, void *data) + +static VALUE +gc_stat(int argc, VALUE *argv, VALUE self) { - struct each_obj_args args; rb_objspace_t *objspace = &rb_objspace; + VALUE hash; - rest_sweep(objspace); - objspace->flags.dont_lazy_sweep = TRUE; - - args.callback = callback; - args.data = data; - rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil); -} - -struct os_each_struct { - size_t num; - VALUE of; -}; - -static int -os_obj_of_i(void *vstart, void *vend, size_t stride, void *data) -{ - struct os_each_struct *oes = (struct os_each_struct *)data; - RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend; - volatile VALUE v; + if (rb_scan_args(argc, argv, "01", &hash) == 1) { + if (!RB_TYPE_P(hash, T_HASH)) + rb_raise(rb_eTypeError, "non-hash given"); + } - for (; p != pend; p++) { - if (p->as.basic.flags) { - switch (BUILTIN_TYPE(p)) { - case T_NONE: - case T_ICLASS: - case T_NODE: - case T_ZOMBIE: - continue; - case T_CLASS: - if (FL_TEST(p, FL_SINGLETON)) - continue; - default: - if (!p->as.basic.klass) continue; - v = (VALUE)p; - if (!oes->of || rb_obj_is_kind_of(v, oes->of)) { - rb_yield(v); - oes->num++; - } - } - } + if (hash == Qnil) { + hash = rb_hash_new(); } - return 0; -} + rest_sweep(objspace); -static VALUE -os_obj_of(VALUE of) -{ - struct os_each_struct oes; + rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count)); - oes.num = 0; - oes.of = of; - rb_objspace_each_objects(os_obj_of_i, &oes); - return SIZET2NUM(oes.num); + /* implementation dependent counters */ + rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used)); + rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length)); + rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment)); + rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num)); + rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num)); + rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num)); + return hash; } /* * call-seq: - * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum - * ObjectSpace.each_object([module]) -> an_enumerator - * - * Calls the block once for each living, nonimmediate object in this - * Ruby process. If module is specified, calls the block - * for only those classes or modules that match (or are a subclass of) - * module. Returns the number of objects found. Immediate - * objects (Fixnums, Symbols - * true, false, and nil) are - * never returned. In the example below, each_object - * returns both the numbers we defined and several constants defined in - * the Math module. - * - * If no block is given, an enumerator is returned instead. - * - * a = 102.7 - * b = 95 # Won't be returned - * c = 12345678987654321 - * count = ObjectSpace.each_object(Numeric) {|x| p x } - * puts "Total count: #{count}" - * - * produces: - * - * 12345678987654321 - * 102.7 - * 2.71828182845905 - * 3.14159265358979 - * 2.22044604925031e-16 - * 1.7976931348623157e+308 - * 2.2250738585072e-308 - * Total count: 7 + * GC.stress -> true or false * + * returns current status of GC stress mode. */ static VALUE -os_each_obj(int argc, VALUE *argv, VALUE os) +gc_stress_get(VALUE self) { - VALUE of; - - rb_secure(4); - if (argc == 0) { - of = 0; - } - else { - rb_scan_args(argc, argv, "01", &of); - } - RETURN_ENUMERATOR(os, 1, &of); - return os_obj_of(of); + rb_objspace_t *objspace = &rb_objspace; + return ruby_gc_stress ? Qtrue : Qfalse; } /* * call-seq: - * ObjectSpace.undefine_finalizer(obj) + * GC.stress = bool -> bool * - * Removes all finalizers for obj. + * Updates the GC stress mode. * + * When stress mode is enabled the GC is invoked at every GC opportunity: + * all memory and object allocations. + * + * Enabling stress mode makes Ruby very slow, it is only for debugging. */ static VALUE -undefine_final(VALUE os, VALUE obj) +gc_stress_set(VALUE self, VALUE flag) { - return rb_undefine_final(obj); + rb_objspace_t *objspace = &rb_objspace; + rb_secure(2); + ruby_gc_stress = RTEST(flag); + return flag; } +/* + * call-seq: + * GC.enable -> true or false + * + * Enables garbage collection, returning true if garbage + * collection was previously disabled. + * + * GC.disable #=> false + * GC.enable #=> true + * GC.enable #=> false + * + */ + VALUE -rb_undefine_final(VALUE obj) +rb_gc_enable(void) { rb_objspace_t *objspace = &rb_objspace; - st_data_t data = obj; - rb_check_frozen(obj); - st_delete(finalizer_table, &data, 0); - FL_UNSET(obj, FL_FINALIZE); - return obj; + int old = dont_gc; + + dont_gc = FALSE; + return old ? Qtrue : Qfalse; } /* * call-seq: - * ObjectSpace.define_finalizer(obj, aProc=proc()) + * GC.disable -> true or false * - * Adds aProc as a finalizer, to be called after obj - * was destroyed. + * Disables garbage collection, returning true if garbage + * collection was already disabled. + * + * GC.disable #=> false + * GC.disable #=> true * */ -static VALUE -define_final(int argc, VALUE *argv, VALUE os) +VALUE +rb_gc_disable(void) { - VALUE obj, block; + rb_objspace_t *objspace = &rb_objspace; + int old = dont_gc; - rb_scan_args(argc, argv, "11", &obj, &block); - rb_check_frozen(obj); - if (argc == 1) { - block = rb_block_proc(); - } - else if (!rb_respond_to(block, rb_intern("call"))) { - rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", - rb_obj_classname(block)); - } - return define_final0(obj, block); + dont_gc = TRUE; + return old ? Qtrue : Qfalse; } -static VALUE -define_final0(VALUE obj, VALUE block) +void +rb_gc_set_params(void) { - rb_objspace_t *objspace = &rb_objspace; - VALUE table; - st_data_t data; - - if (!FL_ABLE(obj)) { - rb_raise(rb_eArgError, "cannot define finalizer for %s", - rb_obj_classname(obj)); - } - RBASIC(obj)->flags |= FL_FINALIZE; + char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr; - block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block); - OBJ_FREEZE(block); + if (rb_safe_level() > 0) return; - if (st_lookup(finalizer_table, obj, &data)) { - table = (VALUE)data; - rb_ary_push(table, block); + malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT"); + if (malloc_limit_ptr != NULL) { + int malloc_limit_i = atoi(malloc_limit_ptr); + if (RTEST(ruby_verbose)) + fprintf(stderr, "malloc_limit=%d (%d)\n", + malloc_limit_i, initial_malloc_limit); + if (malloc_limit_i > 0) { + initial_malloc_limit = malloc_limit_i; + } } - else { - table = rb_ary_new3(1, block); - RBASIC(table)->klass = 0; - st_add_direct(finalizer_table, obj, table); + + heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); + if (heap_min_slots_ptr != NULL) { + int heap_min_slots_i = atoi(heap_min_slots_ptr); + if (RTEST(ruby_verbose)) + fprintf(stderr, "heap_min_slots=%d (%d)\n", + heap_min_slots_i, initial_heap_min_slots); + if (heap_min_slots_i > 0) { + initial_heap_min_slots = heap_min_slots_i; + initial_expand_heap(&rb_objspace); + } } - return block; -} -VALUE -rb_define_final(VALUE obj, VALUE block) -{ - rb_check_frozen(obj); - if (!rb_respond_to(block, rb_intern("call"))) { - rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", - rb_obj_classname(block)); + free_min_ptr = getenv("RUBY_FREE_MIN"); + if (free_min_ptr != NULL) { + int free_min_i = atoi(free_min_ptr); + if (RTEST(ruby_verbose)) + fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min); + if (free_min_i > 0) { + initial_free_min = free_min_i; + } } - return define_final0(obj, block); } -void -rb_gc_copy_finalizer(VALUE dest, VALUE obj) -{ - rb_objspace_t *objspace = &rb_objspace; - VALUE table; - st_data_t data; +/* + ------------------------ Extended allocator ------------------------ +*/ - if (!FL_TEST(obj, FL_FINALIZE)) return; - if (st_lookup(finalizer_table, obj, &data)) { - table = (VALUE)data; - st_insert(finalizer_table, dest, table); - } - FL_SET(dest, FL_FINALIZE); -} +static void vm_xfree(rb_objspace_t *objspace, void *ptr); -static VALUE -run_single_final(VALUE arg) +static void * +negative_size_allocation_error_with_gvl(void *ptr) { - VALUE *args = (VALUE *)arg; - rb_eval_cmd(args[0], args[1], (int)args[2]); - return Qnil; + rb_raise(rb_eNoMemError, "%s", (const char *)ptr); + return 0; /* should not be reached */ } static void -run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table) +negative_size_allocation_error(const char *msg) { - long i; - int status; - VALUE args[3]; - VALUE objid = nonspecial_obj_id(obj); - - if (RARRAY_LEN(table) > 0) { - args[1] = rb_obj_freeze(rb_ary_new3(1, objid)); + if (ruby_thread_has_gvl_p()) { + rb_raise(rb_eNoMemError, "%s", msg); } else { - args[1] = 0; - } - - args[2] = (VALUE)rb_safe_level(); - for (i=0; iheap.final_num--; - - RBASIC(obj)->klass = 0; - - if (RTYPEDDATA_P(obj)) { - free_func = RTYPEDDATA_TYPE(obj)->function.dfree; - } - else { - free_func = RDATA(obj)->dfree; - } - if (free_func) { - (*free_func)(DATA_PTR(obj)); - } - - key = (st_data_t)obj; - if (st_delete(finalizer_table, &key, &table)) { - run_finalizer(objspace, obj, (VALUE)table); - } + rb_memerror(); + return 0; } static void -finalize_deferred(rb_objspace_t *objspace) -{ - RVALUE *p = deferred_final_list; - deferred_final_list = 0; - - if (p) { - finalize_list(objspace, p); - } -} - -void -rb_gc_finalize_deferred(void) -{ - rb_objspace_t *objspace = &rb_objspace; - if (ATOMIC_EXCHANGE(finalizing, 1)) return; - finalize_deferred(objspace); - ATOMIC_SET(finalizing, 0); -} - -static int -chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg) +ruby_memerror(void) { - RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg; - if ((p->as.basic.flags & FL_FINALIZE) == FL_FINALIZE && - !MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) { - if (BUILTIN_TYPE(p) != T_ZOMBIE) { - p->as.free.flags = T_ZOMBIE; - RDATA(p)->dfree = 0; + if (ruby_thread_has_gvl_p()) { + rb_memerror(); + } + else { + if (ruby_native_thread_p()) { + rb_thread_call_with_gvl(ruby_memerror_body, 0); + } + else { + /* no ruby thread */ + fprintf(stderr, "[FATAL] failed to allocate memory\n"); + exit(EXIT_FAILURE); } - p->as.free.next = *final_list; - *final_list = p; } - return ST_CONTINUE; } -struct force_finalize_list { - VALUE obj; - VALUE table; - struct force_finalize_list *next; -}; - -static int -force_chain_object(st_data_t key, st_data_t val, st_data_t arg) +void +rb_memerror(void) { - struct force_finalize_list **prev = (struct force_finalize_list **)arg; - struct force_finalize_list *curr = ALLOC(struct force_finalize_list); - curr->obj = key; - curr->table = val; - curr->next = *prev; - *prev = curr; - return ST_CONTINUE; + rb_thread_t *th = GET_THREAD(); + if (!nomem_error || + (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { + fprintf(stderr, "[FATAL] failed to allocate memory\n"); + exit(EXIT_FAILURE); + } + if (rb_thread_raised_p(th, RAISED_NOMEMORY)) { + rb_thread_raised_clear(th); + GET_THREAD()->errinfo = nomem_error; + JUMP_TAG(TAG_RAISE); + } + rb_thread_raised_set(th, RAISED_NOMEMORY); + rb_exc_raise(nomem_error); } -void -rb_gc_call_finalizer_at_exit(void) +static void * +aligned_malloc(size_t alignment, size_t size) { - rb_objspace_call_finalizer(&rb_objspace); + void *res; + +#if defined __MINGW32__ + res = __mingw_aligned_malloc(size, alignment); +#elif defined _WIN32 && !defined __CYGWIN__ + res = _aligned_malloc(size, alignment); +#elif defined(HAVE_POSIX_MEMALIGN) + if (posix_memalign(&res, alignment, size) == 0) { + return res; + } + else { + return NULL; + } +#elif defined(HAVE_MEMALIGN) + res = memalign(alignment, size); +#else + char* aligned; + res = malloc(alignment + size + sizeof(void*)); + aligned = (char*)res + alignment + sizeof(void*); + aligned -= ((VALUE)aligned & (alignment - 1)); + ((void**)aligned)[-1] = res; + res = (void*)aligned; +#endif + +#if defined(_DEBUG) || defined(GC_DEBUG) + /* alignment must be a power of 2 */ + assert((alignment - 1) & alignment == 0); + assert(alignment % sizeof(void*) == 0); +#endif + return res; } static void -rb_objspace_call_finalizer(rb_objspace_t *objspace) +aligned_free(void *ptr) { - RVALUE *p, *pend; - RVALUE *final_list = 0; - size_t i; - - /* run finalizers */ - rest_sweep(objspace); - - if (ATOMIC_EXCHANGE(finalizing, 1)) return; +#if defined __MINGW32__ + __mingw_aligned_free(ptr); +#elif defined _WIN32 && !defined __CYGWIN__ + _aligned_free(ptr); +#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN) + free(ptr); +#else + free(((void**)ptr)[-1]); +#endif +} - do { - /* XXX: this loop will make no sense */ - /* because mark will not be removed */ - finalize_deferred(objspace); - mark_tbl(objspace, finalizer_table, 0); - st_foreach(finalizer_table, chain_finalized_object, - (st_data_t)&deferred_final_list); - } while (deferred_final_list); - /* force to run finalizer */ - while (finalizer_table->num_entries) { - struct force_finalize_list *list = 0; - st_foreach(finalizer_table, force_chain_object, (st_data_t)&list); - while (list) { - struct force_finalize_list *curr = list; - st_data_t obj = (st_data_t)curr->obj; - run_finalizer(objspace, curr->obj, curr->table); - st_delete(finalizer_table, &obj, 0); - list = curr->next; - xfree(curr); - } +static inline size_t +vm_malloc_prepare(rb_objspace_t *objspace, size_t size) +{ + if ((ssize_t)size < 0) { + negative_size_allocation_error("negative allocation size (or too big)"); } + if (size == 0) size = 1; - /* finalizers are part of garbage collection */ - during_gc++; +#if CALC_EXACT_MALLOC_SIZE + size += sizeof(size_t); +#endif - /* run data object's finalizers */ - for (i = 0; i < heaps_used; i++) { - p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; - while (p < pend) { - if (BUILTIN_TYPE(p) == T_DATA && - DATA_PTR(p) && RANY(p)->as.data.dfree && - !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) && - !rb_obj_is_fiber((VALUE)p)) { - p->as.free.flags = 0; - if (RTYPEDDATA_P(p)) { - RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree; - } - if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) { - xfree(DATA_PTR(p)); - } - else if (RANY(p)->as.data.dfree) { - make_deferred(RANY(p)); - RANY(p)->as.free.next = final_list; - final_list = p; - } - } - else if (BUILTIN_TYPE(p) == T_FILE) { - if (RANY(p)->as.file.fptr) { - make_io_deferred(RANY(p)); - RANY(p)->as.free.next = final_list; - final_list = p; - } - } - p++; - } - } - during_gc = 0; - if (final_list) { - finalize_list(objspace, final_list); + if ((ruby_gc_stress && !ruby_disable_gc_stress) || + (malloc_increase+size) > malloc_limit) { + garbage_collect_with_gvl(objspace); } - st_free_table(finalizer_table); - finalizer_table = 0; - ATOMIC_SET(finalizing, 0); + return size; } -void -rb_gc(void) +static inline void * +vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size) { - rb_objspace_t *objspace = &rb_objspace; - garbage_collect(objspace); - if (!finalizing) finalize_deferred(objspace); - free_unused_heaps(objspace); -} + ATOMIC_SIZE_ADD(malloc_increase, size); -static inline int -is_id_value(rb_objspace_t *objspace, VALUE ptr) -{ - if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE; - if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE; - if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE; - return TRUE; +#if CALC_EXACT_MALLOC_SIZE + ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size); + ATOMIC_SIZE_INC(objspace->malloc_params.allocations); + ((size_t *)mem)[0] = size; + mem = (size_t *)mem + 1; +#endif + + return mem; } -static inline int -is_dead_object(rb_objspace_t *objspace, VALUE ptr) +#define TRY_WITH_GC(alloc) do { \ + if (!(alloc) && \ + (!garbage_collect_with_gvl(objspace) || \ + !(alloc))) { \ + ruby_memerror(); \ + } \ + } while (0) + +static void * +vm_xmalloc(rb_objspace_t *objspace, size_t size) { - struct heaps_slot *slot = objspace->heap.sweep_slots; - if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr)) - return FALSE; - while (slot) { - if ((VALUE)slot->slot <= ptr && ptr < (VALUE)(slot->slot + slot->limit)) - return TRUE; - slot = slot->next; - } - return FALSE; + void *mem; + + size = vm_malloc_prepare(objspace, size); + TRY_WITH_GC(mem = malloc(size)); + return vm_malloc_fixup(objspace, mem, size); } -static inline int -is_live_object(rb_objspace_t *objspace, VALUE ptr) +static void * +vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size) { - if (BUILTIN_TYPE(ptr) == 0) return FALSE; - if (RBASIC(ptr)->klass == 0) return FALSE; - if (is_dead_object(objspace, ptr)) return FALSE; - return TRUE; -} + void *mem; +#if CALC_EXACT_MALLOC_SIZE + size_t oldsize; +#endif -/* - * call-seq: - * ObjectSpace._id2ref(object_id) -> an_object - * - * Converts an object id to a reference to the object. May not be - * called on an object id passed as a parameter to a finalizer. - * - * s = "I am a string" #=> "I am a string" - * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string" - * r == s #=> true - * - */ + if ((ssize_t)size < 0) { + negative_size_allocation_error("negative re-allocation size"); + } + if (!ptr) return vm_xmalloc(objspace, size); + if (size == 0) { + vm_xfree(objspace, ptr); + return 0; + } + if (ruby_gc_stress && !ruby_disable_gc_stress) + garbage_collect_with_gvl(objspace); -static VALUE -id2ref(VALUE obj, VALUE objid) -{ -#if SIZEOF_LONG == SIZEOF_VOIDP -#define NUM2PTR(x) NUM2ULONG(x) -#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP -#define NUM2PTR(x) NUM2ULL(x) +#if CALC_EXACT_MALLOC_SIZE + size += sizeof(size_t); + ptr = (size_t *)ptr - 1; + oldsize = ((size_t *)ptr)[0]; #endif - rb_objspace_t *objspace = &rb_objspace; - VALUE ptr; - void *p0; - rb_secure(4); - ptr = NUM2PTR(objid); - p0 = (void *)ptr; + mem = realloc(ptr, size); + if (!mem) { + if (garbage_collect_with_gvl(objspace)) { + mem = realloc(ptr, size); + } + if (!mem) { + ruby_memerror(); + } + } + ATOMIC_SIZE_ADD(malloc_increase, size); - if (ptr == Qtrue) return Qtrue; - if (ptr == Qfalse) return Qfalse; - if (ptr == Qnil) return Qnil; - if (FIXNUM_P(ptr)) return (VALUE)ptr; - ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ +#if CALC_EXACT_MALLOC_SIZE + ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize); + ((size_t *)mem)[0] = size; + mem = (size_t *)mem + 1; +#endif - if ((ptr % sizeof(RVALUE)) == (4 << 2)) { - ID symid = ptr / sizeof(RVALUE); - if (rb_id2name(symid) == 0) - rb_raise(rb_eRangeError, "%p is not symbol id value", p0); - return ID2SYM(symid); - } + return mem; +} - if (!is_id_value(objspace, ptr)) { - rb_raise(rb_eRangeError, "%p is not id value", p0); - } - if (!is_live_object(objspace, ptr)) { - rb_raise(rb_eRangeError, "%p is recycled object", p0); +static void +vm_xfree(rb_objspace_t *objspace, void *ptr) +{ +#if CALC_EXACT_MALLOC_SIZE + size_t size; + ptr = ((size_t *)ptr) - 1; + size = ((size_t*)ptr)[0]; + if (size) { + ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size); + ATOMIC_SIZE_DEC(objspace->malloc_params.allocations); } - return (VALUE)ptr; -} +#endif -/* - * Document-method: __id__ - * Document-method: object_id - * - * call-seq: - * obj.__id__ -> fixnum - * obj.object_id -> fixnum - * - * Returns an integer identifier for obj. The same number will - * be returned on all calls to id for a given object, and - * no two active objects will share an id. - * Object#object_id is a different concept from the - * :name notation, which returns the symbol id of - * name. Replaces the deprecated Object#id. - */ + free(ptr); +} -/* - * call-seq: - * obj.hash -> fixnum - * - * Generates a Fixnum hash value for this object. This - * function must have the property that a.eql?(b) implies - * a.hash == b.hash. The hash value is used by class - * Hash. Any hash value that exceeds the capacity of a - * Fixnum will be truncated before being used. - */ +void * +ruby_xmalloc(size_t size) +{ + return vm_xmalloc(&rb_objspace, size); +} -VALUE -rb_obj_id(VALUE obj) +static inline size_t +xmalloc2_size(size_t n, size_t size) { - /* - * 32-bit VALUE space - * MSB ------------------------ LSB - * false 00000000000000000000000000000000 - * true 00000000000000000000000000000010 - * nil 00000000000000000000000000000100 - * undef 00000000000000000000000000000110 - * symbol ssssssssssssssssssssssss00001110 - * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE)) - * fixnum fffffffffffffffffffffffffffffff1 - * - * object_id space - * LSB - * false 00000000000000000000000000000000 - * true 00000000000000000000000000000010 - * nil 00000000000000000000000000000100 - * undef 00000000000000000000000000000110 - * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4) - * object oooooooooooooooooooooooooooooo0 o...o % A = 0 - * fixnum fffffffffffffffffffffffffffffff1 bignum if required - * - * where A = sizeof(RVALUE)/4 - * - * sizeof(RVALUE) is - * 20 if 32-bit, double is 4-byte aligned - * 24 if 32-bit, double is 8-byte aligned - * 40 if 64-bit - */ - if (SYMBOL_P(obj)) { - return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG; - } - if (SPECIAL_CONST_P(obj)) { - return LONG2NUM((SIGNED_VALUE)obj); + size_t len = size * n; + if (n != 0 && size != len / n) { + rb_raise(rb_eArgError, "malloc: possible integer overflow"); } - return nonspecial_obj_id(obj); + return len; } -static int -set_zero(st_data_t key, st_data_t val, st_data_t arg) +void * +ruby_xmalloc2(size_t n, size_t size) { - VALUE k = (VALUE)key; - VALUE hash = (VALUE)arg; - rb_hash_aset(hash, k, INT2FIX(0)); - return ST_CONTINUE; + return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size)); } -/* - * call-seq: - * ObjectSpace.count_objects([result_hash]) -> hash - * - * Counts objects for each type. - * - * It returns a hash as: - * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...} - * - * If the optional argument, result_hash, is given, - * it is overwritten and returned. - * This is intended to avoid probe effect. - * - * The contents of the returned hash is implementation defined. - * It may be changed in future. - * - * This method is not expected to work except C Ruby. - * - */ - -static VALUE -count_objects(int argc, VALUE *argv, VALUE os) +static void * +vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize) { - rb_objspace_t *objspace = &rb_objspace; - size_t counts[T_MASK+1]; - size_t freed = 0; - size_t total = 0; - size_t i; - VALUE hash; - - if (rb_scan_args(argc, argv, "01", &hash) == 1) { - if (!RB_TYPE_P(hash, T_HASH)) - rb_raise(rb_eTypeError, "non-hash given"); - } + void *mem; + size_t size; - for (i = 0; i <= T_MASK; i++) { - counts[i] = 0; - } + size = xmalloc2_size(count, elsize); + size = vm_malloc_prepare(objspace, size); - for (i = 0; i < heaps_used; i++) { - RVALUE *p, *pend; + TRY_WITH_GC(mem = calloc(1, size)); + return vm_malloc_fixup(objspace, mem, size); +} - p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; - for (;p < pend; p++) { - if (p->as.basic.flags) { - counts[BUILTIN_TYPE(p)]++; - } - else { - freed++; - } - } - total += objspace->heap.sorted[i].slot->limit; - } +void * +ruby_xcalloc(size_t n, size_t size) +{ + return vm_xcalloc(&rb_objspace, n, size); +} - if (hash == Qnil) { - hash = rb_hash_new(); - } - else if (!RHASH_EMPTY_P(hash)) { - st_foreach(RHASH_TBL(hash), set_zero, hash); - } - rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total)); - rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed)); +void * +ruby_xrealloc(void *ptr, size_t size) +{ + return vm_xrealloc(&rb_objspace, ptr, size); +} - for (i = 0; i <= T_MASK; i++) { - VALUE type; - switch (i) { -#define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break; - COUNT_TYPE(T_NONE); - COUNT_TYPE(T_OBJECT); - COUNT_TYPE(T_CLASS); - COUNT_TYPE(T_MODULE); - COUNT_TYPE(T_FLOAT); - COUNT_TYPE(T_STRING); - COUNT_TYPE(T_REGEXP); - COUNT_TYPE(T_ARRAY); - COUNT_TYPE(T_HASH); - COUNT_TYPE(T_STRUCT); - COUNT_TYPE(T_BIGNUM); - COUNT_TYPE(T_FILE); - COUNT_TYPE(T_DATA); - COUNT_TYPE(T_MATCH); - COUNT_TYPE(T_COMPLEX); - COUNT_TYPE(T_RATIONAL); - COUNT_TYPE(T_NIL); - COUNT_TYPE(T_TRUE); - COUNT_TYPE(T_FALSE); - COUNT_TYPE(T_SYMBOL); - COUNT_TYPE(T_FIXNUM); - COUNT_TYPE(T_UNDEF); - COUNT_TYPE(T_NODE); - COUNT_TYPE(T_ICLASS); - COUNT_TYPE(T_ZOMBIE); -#undef COUNT_TYPE - default: type = INT2NUM(i); break; - } - if (counts[i]) - rb_hash_aset(hash, type, SIZET2NUM(counts[i])); +void * +ruby_xrealloc2(void *ptr, size_t n, size_t size) +{ + size_t len = size * n; + if (n != 0 && size != len / n) { + rb_raise(rb_eArgError, "realloc: possible integer overflow"); } + return ruby_xrealloc(ptr, len); +} - return hash; +void +ruby_xfree(void *x) +{ + if (x) + vm_xfree(&rb_objspace, x); +} + + +/* Mimic ruby_xmalloc, but need not rb_objspace. + * should return pointer suitable for ruby_xfree + */ +void * +ruby_mimmalloc(size_t size) +{ + void *mem; +#if CALC_EXACT_MALLOC_SIZE + size += sizeof(size_t); +#endif + mem = malloc(size); +#if CALC_EXACT_MALLOC_SIZE + /* set 0 for consistency of allocated_size/allocations */ + ((size_t *)mem)[0] = 0; + mem = (size_t *)mem + 1; +#endif + return mem; } +#if CALC_EXACT_MALLOC_SIZE /* - * Document-class: ObjectSpace::WeakMap + * call-seq: + * GC.malloc_allocated_size -> Integer * - * An ObjectSpace::WeakMap object holds references to - * any objects, but those objects can get disposed by GC. + * The allocated size by malloc(). + * + * It returns the allocated size by malloc(). + */ + +static VALUE +gc_malloc_allocated_size(VALUE self) +{ + return UINT2NUM(rb_objspace.malloc_params.allocated_size); +} + +/* + * call-seq: + * GC.malloc_allocations -> Integer + * + * The number of allocated memory object by malloc(). + * + * It returns the number of allocated memory object by malloc(). */ +static VALUE +gc_malloc_allocations(VALUE self) +{ + return UINT2NUM(rb_objspace.malloc_params.allocations); +} +#endif + +/* + ------------------------------ WeakMap ------------------------------ +*/ + struct weakmap { st_table *obj2wmap; /* obj -> [ref,...] */ st_table *wmap2obj; /* ref -> obj */ @@ -3616,108 +3658,6 @@ wmap_aref(VALUE self, VALUE wmap) return obj; } -/* - * call-seq: - * GC.count -> Integer - * - * The number of times GC occurred. - * - * It returns the number of times GC occurred since the process started. - * - */ - -static VALUE -gc_count(VALUE self) -{ - return UINT2NUM(rb_objspace.count); -} - -/* - * call-seq: - * GC.stat -> Hash - * - * Returns a Hash containing information about the GC. - * - * The hash includes information about internal statistics about GC such as: - * - * { - * :count => 18, - * :heap_used => 77, - * :heap_length => 77, - * :heap_increment => 0, - * :heap_live_num => 23287, - * :heap_free_num => 8115, - * :heap_final_num => 0, - * } - * - * The contents of the hash are implementation defined and may be changed in - * the future. - * - * This method is only expected to work on C Ruby. - * - */ - -static VALUE -gc_stat(int argc, VALUE *argv, VALUE self) -{ - rb_objspace_t *objspace = &rb_objspace; - VALUE hash; - - if (rb_scan_args(argc, argv, "01", &hash) == 1) { - if (!RB_TYPE_P(hash, T_HASH)) - rb_raise(rb_eTypeError, "non-hash given"); - } - - if (hash == Qnil) { - hash = rb_hash_new(); - } - - rest_sweep(objspace); - - rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count)); - - /* implementation dependent counters */ - rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used)); - rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length)); - rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment)); - rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num)); - rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num)); - rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num)); - return hash; -} - - -#if CALC_EXACT_MALLOC_SIZE -/* - * call-seq: - * GC.malloc_allocated_size -> Integer - * - * The allocated size by malloc(). - * - * It returns the allocated size by malloc(). - */ - -static VALUE -gc_malloc_allocated_size(VALUE self) -{ - return UINT2NUM(rb_objspace.malloc_params.allocated_size); -} - -/* - * call-seq: - * GC.malloc_allocations -> Integer - * - * The number of allocated memory object by malloc(). - * - * It returns the number of allocated memory object by malloc(). - */ - -static VALUE -gc_malloc_allocations(VALUE self) -{ - return UINT2NUM(rb_objspace.malloc_params.allocations); -} -#endif /* ------------------------------ GC profiler ------------------------------ @@ -4132,6 +4072,92 @@ gc_profile_total_time(VALUE self) return DBL2NUM(time); } +/* + * call-seq: + * GC::Profiler.enable? -> true or false + * + * The current status of GC profile mode. + */ + +static VALUE +gc_profile_enable_get(VALUE self) +{ + rb_objspace_t *objspace = &rb_objspace; + return objspace->profile.run ? Qtrue : Qfalse; +} + +/* + * call-seq: + * GC::Profiler.enable -> nil + * + * Starts the GC profiler. + * + */ + +static VALUE +gc_profile_enable(void) +{ + rb_objspace_t *objspace = &rb_objspace; + + objspace->profile.run = TRUE; + return Qnil; +} + +/* + * call-seq: + * GC::Profiler.disable -> nil + * + * Stops the GC profiler. + * + */ + +static VALUE +gc_profile_disable(void) +{ + rb_objspace_t *objspace = &rb_objspace; + + objspace->profile.run = FALSE; + return Qnil; +} + +/* + * Document-class: ObjectSpace + * + * The ObjectSpace module contains a number of routines + * that interact with the garbage collection facility and allow you to + * traverse all living objects with an iterator. + * + * ObjectSpace also provides support for object + * finalizers, procs that will be called when a specific object is + * about to be destroyed by garbage collection. + * + * include ObjectSpace + * + * + * a = "A" + * b = "B" + * c = "C" + * + * + * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" }) + * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" }) + * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" }) + * + * produces: + * + * Finalizer three on 537763470 + * Finalizer one on 537763480 + * Finalizer two on 537763480 + * + */ + +/* + * Document-class: ObjectSpace::WeakMap + * + * An ObjectSpace::WeakMap object holds references to + * any objects, but those objects can get disposed by GC. + */ + /* Document-class: GC::Profiler * * The GC profiler provides access to information on GC runs including time, -- cgit v1.2.3