aboutsummaryrefslogtreecommitdiffstats
path: root/vm_backtrace.c
diff options
context:
space:
mode:
authorJean Boussier <byroot@ruby-lang.org>2023-11-10 10:21:28 +0100
committerJean Boussier <jean.boussier@gmail.com>2023-11-10 15:56:22 +0100
commita2442e91fded557fe79a49a86b84565ee66e34cc (patch)
tree4ae6f839d8a8e81acee712986f0206bd1ae9757e /vm_backtrace.c
parent1ee69688553270b2a0a5d834d06615089effe02a (diff)
downloadruby-a2442e91fded557fe79a49a86b84565ee66e34cc.tar.gz
Embed Backtrace::Location objects
The struct is 16B, so they will use the 80B size pool, so on paper it wastes 80 - 32 - 16 = 52B, however most malloc implementations will either pad sizes or use an extra 16B for each segment, so in practice the waste isn't that big. Also `Backtrace::Location` are rarely held on for long, so avoiding the malloc churn help performance. Co-Authored-By: Étienne Barrié <etienne.barrie@gmail.com>
Diffstat (limited to 'vm_backtrace.c')
-rw-r--r--vm_backtrace.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/vm_backtrace.c b/vm_backtrace.c
index 2414adae6e..551fdad0d1 100644
--- a/vm_backtrace.c
+++ b/vm_backtrace.c
@@ -162,14 +162,13 @@ location_mark_entry(rb_backtrace_location_t *fi)
static size_t
location_memsize(const void *ptr)
{
- /* rb_backtrace_location_t *fi = (rb_backtrace_location_t *)ptr; */
- return sizeof(rb_backtrace_location_t);
+ return 0;
}
static const rb_data_type_t location_data_type = {
"frame_info",
{location_mark, RUBY_TYPED_DEFAULT_FREE, location_memsize,},
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
};
int
@@ -182,7 +181,7 @@ static inline rb_backtrace_location_t *
location_ptr(VALUE locobj)
{
struct valued_frame_info *vloc;
- GetCoreDataFromValue(locobj, struct valued_frame_info, vloc);
+ TypedData_Get_Struct(locobj, struct valued_frame_info, &location_data_type, vloc);
return vloc->loc;
}