aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2019-09-04 09:38:17 -0700
committerAaron Patterson <tenderlove@ruby-lang.org>2019-09-05 10:13:50 -0700
commit545b6db3fb367944f72cee5d41892eed63574634 (patch)
treec4fa68600e1d00cfdc23b082a8435b78e1c493f0
parentf0fd1c0cd8d34b870a3011a36f5179d1b5f3547d (diff)
downloadruby-545b6db3fb367944f72cee5d41892eed63574634.tar.gz
Create two buckets for allocating NODE structs
This commit adds two buckets for allocating NODE structs, then allocates "markable" NODE objects from one bucket. The reason to do this is so when the AST mark function scans nodes for VALUE objects to mark, we only scan NODE objects that we know to reference VALUE objects. If we *did not* divide the objects, then the mark function spends too much time scanning objects that don't contain any references.
-rw-r--r--node.c81
-rw-r--r--node.h2
-rw-r--r--parse.y2
3 files changed, 67 insertions, 18 deletions
diff --git a/node.c b/node.c
index 064ce006ad..8dff1d200d 100644
--- a/node.c
+++ b/node.c
@@ -1120,28 +1120,41 @@ typedef struct node_buffer_elem_struct {
NODE buf[FLEX_ARY_LEN];
} node_buffer_elem_t;
-struct node_buffer_struct {
+typedef struct {
long idx, len;
node_buffer_elem_t *head;
node_buffer_elem_t *last;
+} node_buffer_list_t;
+
+struct node_buffer_struct {
+ node_buffer_list_t unmarkable;
+ node_buffer_list_t markable;
VALUE mark_ary;
};
-static node_buffer_t *
-rb_node_buffer_new(void)
+static void
+init_node_buffer_list(node_buffer_list_t * nb, node_buffer_elem_t *head)
{
- node_buffer_t *nb = xmalloc(sizeof(node_buffer_t) + offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE));
nb->idx = 0;
nb->len = NODE_BUF_DEFAULT_LEN;
- nb->head = nb->last = (node_buffer_elem_t*) &nb[1];
+ nb->head = nb->last = head;
nb->head->len = nb->len;
nb->head->next = NULL;
+}
+
+static node_buffer_t *
+rb_node_buffer_new(void)
+{
+ size_t bucket_size = offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE);
+ node_buffer_t *nb = xmalloc(sizeof(node_buffer_t) + (bucket_size * 2));
+ init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
+ init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
nb->mark_ary = rb_ary_tmp_new(0);
return nb;
}
static void
-rb_node_buffer_free(node_buffer_t *nb)
+node_buffer_list_free(node_buffer_list_t * nb)
{
node_buffer_elem_t *nbe = nb->head;
@@ -1150,13 +1163,19 @@ rb_node_buffer_free(node_buffer_t *nb)
nbe = nbe->next;
xfree(buf);
}
+}
+
+static void
+rb_node_buffer_free(node_buffer_t *nb)
+{
+ node_buffer_list_free(&nb->unmarkable);
+ node_buffer_list_free(&nb->markable);
xfree(nb);
}
-NODE *
-rb_ast_newnode(rb_ast_t *ast)
+static NODE *
+ast_newnode_in_bucket(node_buffer_list_t *nb)
{
- node_buffer_t *nb = ast->node_buffer;
if (nb->idx >= nb->len) {
long n = nb->len * 2;
node_buffer_elem_t *nbe;
@@ -1170,6 +1189,27 @@ rb_ast_newnode(rb_ast_t *ast)
return &nb->head->buf[nb->idx++];
}
+NODE *
+rb_ast_newnode(rb_ast_t *ast, enum node_type type)
+{
+ node_buffer_t *nb = ast->node_buffer;
+ switch (type) {
+ case NODE_LIT:
+ case NODE_STR:
+ case NODE_XSTR:
+ case NODE_DSTR:
+ case NODE_DXSTR:
+ case NODE_DREGX:
+ case NODE_DSYM:
+ case NODE_ARGS:
+ case NODE_SCOPE:
+ case NODE_ARYPTN:
+ return ast_newnode_in_bucket(&nb->markable);
+ default:
+ return ast_newnode_in_bucket(&nb->unmarkable);
+ }
+}
+
void
rb_ast_delete_node(rb_ast_t *ast, NODE *n)
{
@@ -1200,7 +1240,7 @@ iterate_buffer_elements(node_buffer_elem_t *nbe, long len, node_itr_t *func, voi
}
static void
-iterate_node_values(node_buffer_t *nb, node_itr_t * func, void *ctx)
+iterate_node_values(node_buffer_list_t *nb, node_itr_t * func, void *ctx)
{
node_buffer_elem_t *nbe = nb->head;
@@ -1249,7 +1289,7 @@ rb_ast_mark(rb_ast_t *ast)
if (ast->node_buffer) {
node_buffer_t *nb = ast->node_buffer;
- iterate_node_values(nb, mark_ast_value, NULL);
+ iterate_node_values(&nb->markable, mark_ast_value, NULL);
}
}
@@ -1262,6 +1302,18 @@ rb_ast_free(rb_ast_t *ast)
}
}
+static size_t
+buffer_list_size(node_buffer_list_t *nb)
+{
+ size_t size = 0;
+ node_buffer_elem_t *nbe = nb->head;
+ while (nbe != nb->last) {
+ nbe = nbe->next;
+ size += offsetof(node_buffer_elem_t, buf) + nb->len * sizeof(NODE);
+ }
+ return size;
+}
+
size_t
rb_ast_memsize(const rb_ast_t *ast)
{
@@ -1270,11 +1322,8 @@ rb_ast_memsize(const rb_ast_t *ast)
if (nb) {
size += sizeof(node_buffer_t) + offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE);
- node_buffer_elem_t *nbe = nb->head;
- while (nbe != nb->last) {
- nbe = nbe->next;
- size += offsetof(node_buffer_elem_t, buf) + nb->len * sizeof(NODE);
- }
+ size += buffer_list_size(&nb->unmarkable);
+ size += buffer_list_size(&nb->markable);
}
return size;
}
diff --git a/node.h b/node.h
index dbc3162512..4e78b0fff3 100644
--- a/node.h
+++ b/node.h
@@ -409,7 +409,7 @@ void rb_ast_dispose(rb_ast_t*);
void rb_ast_free(rb_ast_t*);
size_t rb_ast_memsize(const rb_ast_t*);
void rb_ast_add_mark_object(rb_ast_t*, VALUE);
-NODE *rb_ast_newnode(rb_ast_t*);
+NODE *rb_ast_newnode(rb_ast_t*, enum node_type type);
void rb_ast_delete_node(rb_ast_t*, NODE *n);
VALUE rb_parser_new(void);
diff --git a/parse.y b/parse.y
index b230ddb183..811c668c4a 100644
--- a/parse.y
+++ b/parse.y
@@ -9357,7 +9357,7 @@ yylex(YYSTYPE *lval, YYLTYPE *yylloc, struct parser_params *p)
static NODE*
node_newnode(struct parser_params *p, enum node_type type, VALUE a0, VALUE a1, VALUE a2, const rb_code_location_t *loc)
{
- NODE *n = rb_ast_newnode(p->ast);
+ NODE *n = rb_ast_newnode(p->ast, type);
rb_node_init(n, type, a0, a1, a2);