aboutsummaryrefslogtreecommitdiffstats
path: root/iseq.h
diff options
context:
space:
mode:
authortenderlove <tenderlove@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-03-09 20:11:45 +0000
committertenderlove <tenderlove@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-03-09 20:11:45 +0000
commit4ba669da7f42b02691f5c4c9febea20e7a0895f2 (patch)
treeba13d7442ca20fdb723d5a4bd722692d2bfdfaa6 /iseq.h
parentb2790709604d32203018cf7fe7f5f1b3b5befd99 (diff)
downloadruby-4ba669da7f42b02691f5c4c9febea20e7a0895f2.tar.gz
Add direct marking on iseq operands
Directly marking iseq operands allows us to eliminate the "mark array" stored on ISEQ objects, which will reduce the amount of memory ISEQ objects consume. This patch changes the iseq mark function to: * Directly marks ISEQ operands * Iterate over and mark child ISEQs It also introduces two flags on the ISEQ object. In order to mark instruction operands, we have to disassemble the instructions and find the instruction parameters and types. Instructions may also be translated to jump addresses. Instruction sequences may get marked by the GC *while* they're mid flight (being compiled). The `ISEQ_TRANSLATED` flag is used to indicate whether or not the instructions have been translated to jump addresses so that when we decode the instructions we know whether or not we need to go from jump location back to original instruction or not. Not all ISEQ objects have any markable objects embedded in their instructions. We can detect whether or not an ISEQ has markable objects in the instructions at compile time. If the instructions contain markable objects, we set a flag `ISEQ_MARKABLE_ISEQ` on the ISEQ object. This means that during the mark phase, we can skip decompilation if the flag is *not* set. In other words, we can avoid decompilation of we know in advance there is nothing to mark. `once` instructions have an operand that contains the result of a one-time compilation of a regex. Before this patch, that operand was called an "inline cache", even though the struct was actually an "inline storage". This patch changes the operand to be an "inline storage" so that we can differentiate between caches that need marking (the inline storage) and caches that don't need marking (inline cache). [ruby-core:84909] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62706 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'iseq.h')
-rw-r--r--iseq.h38
1 files changed, 10 insertions, 28 deletions
diff --git a/iseq.h b/iseq.h
index 0c316e2cb2..a39d827670 100644
--- a/iseq.h
+++ b/iseq.h
@@ -28,44 +28,25 @@ rb_call_info_kw_arg_bytes(int keyword_len)
return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
}
-enum iseq_mark_ary_index {
- ISEQ_MARK_ARY_COVERAGE,
- ISEQ_MARK_ARY_FLIP_CNT,
- ISEQ_MARK_ARY_ORIGINAL_ISEQ,
- ISEQ_MARK_ARY_INITIAL_SIZE
-};
-
-static inline VALUE
-iseq_mark_ary_create(int flip_cnt)
-{
- VALUE ary = rb_ary_tmp_new(ISEQ_MARK_ARY_INITIAL_SIZE);
- rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_COVERAGE */
- rb_ary_push(ary, INT2FIX(flip_cnt)); /* ISEQ_MARK_ARY_FLIP_CNT */
- rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_ORIGINAL_ISEQ */
- return ary;
-}
-
-#define ISEQ_MARK_ARY(iseq) (iseq)->body->mark_ary
-
-#define ISEQ_COVERAGE(iseq) RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE)
-#define ISEQ_COVERAGE_SET(iseq, cov) RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE, cov)
+#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
+#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
-#define ISEQ_FLIP_CNT(iseq) FIX2INT(RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT))
+#define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
static inline int
ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq)
{
- int cnt = ISEQ_FLIP_CNT(iseq);
- RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT, INT2FIX(cnt+1));
+ int cnt = iseq->body->variable.flip_count;
+ iseq->body->variable.flip_count += 1;
return cnt;
}
static inline VALUE *
ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
{
- VALUE str = RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ);
+ VALUE str = iseq->body->variable.original_iseq;
if (RTEST(str)) return (VALUE *)RSTRING_PTR(str);
return NULL;
}
@@ -73,14 +54,14 @@ ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
static inline void
ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
{
- RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, Qnil);
+ RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, Qnil);
}
static inline VALUE *
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
{
VALUE str = rb_str_tmp_new(size * sizeof(VALUE));
- RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, str);
+ RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, str);
return (VALUE *)RSTRING_PTR(str);
}
@@ -94,6 +75,8 @@ ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
+#define ISEQ_TRANSLATED IMEMO_FL_USER3
+#define ISEQ_MARKABLE_ISEQ IMEMO_FL_USER4
struct iseq_compile_data {
/* GC is needed */
@@ -173,7 +156,6 @@ void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc,
VALUE exception, VALUE body);
/* iseq.c */
-void rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj);
VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
struct st_table *ruby_insn_make_insn_table(void);