aboutsummaryrefslogtreecommitdiffstats
path: root/gc.c
diff options
context:
space:
mode:
authornormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-05-18 08:40:16 +0000
committernormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-05-18 08:40:16 +0000
commit007f5e2e4b7c150b55152cddecfd5443ef7804f9 (patch)
tree1ed0c7b2e41d900eca421148ee888c6c87ad73c3 /gc.c
parentb8fa227e5858b3f8b78d53a8b91d8f064292d359 (diff)
downloadruby-007f5e2e4b7c150b55152cddecfd5443ef7804f9.tar.gz
Revert "gc.c: use monotonic counters for objspace_malloc_increase"
There were major size regressions I failed to notice before in: bm_array_sample_100k__6k bm_array_sample_100k___10k bm_array_sample_100k___50k This reverts commit r63463 / 14fb10a9ec41c77a91a778ca2f705c1897958afb git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63464 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c69
1 files changed, 20 insertions, 49 deletions
diff --git a/gc.c b/gc.c
index be2a5c4b72..6ab41e6943 100644
--- a/gc.c
+++ b/gc.c
@@ -511,15 +511,10 @@ enum gc_mode {
gc_mode_sweeping
};
-struct monoctr {
- size_t add;
- size_t sub;
-};
-
typedef struct rb_objspace {
struct {
size_t limit;
- struct monoctr m;
+ size_t increase;
#if MALLOC_ALLOCATED_SIZE
size_t allocated_size;
size_t allocations;
@@ -742,6 +737,7 @@ static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT_MIN}};
VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define malloc_limit objspace->malloc_params.limit
+#define malloc_increase objspace->malloc_params.increase
#define malloc_allocated_size objspace->malloc_params.allocated_size
#define heap_pages_sorted objspace->heap_pages.sorted
#define heap_allocated_pages objspace->heap_pages.allocated_pages
@@ -5098,9 +5094,9 @@ gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
static void
gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char *checker_name)
{
- struct monoctr saved_malloc = objspace->malloc_params.m;
+ size_t saved_malloc_increase = objspace->malloc_params.increase;
#if RGENGC_ESTIMATE_OLDMALLOC
- struct monoctr saved_oldmalloc = objspace->rgengc.oldmalloc;
+ size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
#endif
VALUE already_disabled = rb_gc_disable();
@@ -5121,9 +5117,9 @@ gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char
objspace->rgengc.allrefs_table = 0;
if (already_disabled == Qfalse) rb_gc_enable();
- objspace->malloc_params.m = saved_malloc;
+ objspace->malloc_params.increase = saved_malloc_increase;
#if RGENGC_ESTIMATE_OLDMALLOC
- objspace->rgengc.oldmalloc = saved_oldmalloc;
+ objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
#endif
}
#endif /* RGENGC_CHECK_MODE >= 4 */
@@ -6330,42 +6326,14 @@ ready_to_gc(rb_objspace_t *objspace)
}
}
-static size_t
-monoctr_read(const struct monoctr *mc)
-{
- size_t add = mc->add;
- size_t sub = mc->sub;
- size_t diff = add - sub;
-
- return (diff <= add) ? diff : 0;
-}
-
-static size_t
-monoctr_xchg0(struct monoctr *mc)
-{
- size_t add = ATOMIC_SIZE_EXCHANGE(mc->add, 0);
- size_t sub = ATOMIC_SIZE_EXCHANGE(mc->sub, 0);
- size_t diff = add - sub;
-
- return (diff <= add) ? diff : 0;
-}
-
-static size_t
-malloc_increase(const rb_objspace_t *objspace)
-{
- return monoctr_read(&objspace->malloc_params.m);
-}
-
static void
gc_reset_malloc_info(rb_objspace_t *objspace)
{
gc_prof_set_malloc_info(objspace);
{
- size_t inc = monoctr_xchg0(&objspace->malloc_params.m);
+ size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
size_t old_limit = malloc_limit;
- objspace->rgengc.oldmalloc_increase += inc;
-
if (inc > malloc_limit) {
malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
if (gc_params.malloc_limit_max > 0 && /* ignore max-check if 0 */
@@ -7212,7 +7180,7 @@ gc_stat_internal(VALUE hash_or_sym)
SET(total_freed_pages, objspace->profile.total_freed_pages);
SET(total_allocated_objects, objspace->total_allocated_objects);
SET(total_freed_objects, objspace->profile.total_freed_objects);
- SET(malloc_increase_bytes, malloc_increase(objspace));
+ SET(malloc_increase_bytes, malloc_increase);
SET(malloc_increase_bytes_limit, malloc_limit);
#if USE_RGENGC
SET(minor_gc_count, objspace->profile.minor_gc_count);
@@ -7822,7 +7790,6 @@ enum memop_type {
MEMOP_TYPE_REALLOC = 3
};
-#if MALLOC_ALLOCATED_SIZE
static inline void
atomic_sub_nounderflow(size_t *var, size_t sub)
{
@@ -7834,7 +7801,6 @@ atomic_sub_nounderflow(size_t *var, size_t sub)
if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
}
}
-#endif
static void
objspace_malloc_gc_stress(rb_objspace_t *objspace)
@@ -7847,17 +7813,22 @@ objspace_malloc_gc_stress(rb_objspace_t *objspace)
static void
objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
{
- /* n.b. these checks for non-zero get inlined */
- if (new_size) {
- ATOMIC_SIZE_ADD(objspace->malloc_params.m.add, new_size);
+ if (new_size > old_size) {
+ ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
+#if RGENGC_ESTIMATE_OLDMALLOC
+ ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
+#endif
}
- if (old_size) {
- ATOMIC_SIZE_ADD(objspace->malloc_params.m.sub, old_size);
+ else {
+ atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
+#if RGENGC_ESTIMATE_OLDMALLOC
+ atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
+#endif
}
if (type == MEMOP_TYPE_MALLOC) {
retry:
- if (malloc_increase(objspace) > malloc_limit && ruby_native_thread_p() && !dont_gc) {
+ if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
@@ -8839,7 +8810,7 @@ gc_prof_set_malloc_info(rb_objspace_t *objspace)
#if GC_PROFILE_MORE_DETAIL
if (gc_prof_enabled(objspace)) {
gc_profile_record *record = gc_prof_record(objspace);
- record->allocate_increase = malloc_increase(objspace);
+ record->allocate_increase = malloc_increase;
record->allocate_limit = malloc_limit;
}
#endif