aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--array.c6
-rw-r--r--compile.c10
-rw-r--r--enumerator.c6
-rw-r--r--gc.c24
-rw-r--r--hash.c2
-rw-r--r--include/ruby/internal/core/rbignum.h6
-rw-r--r--io.c9
-rw-r--r--memory_view.c6
-rw-r--r--mjit_worker.c2
-rw-r--r--object.c2
-rw-r--r--parse.y2
-rw-r--r--proc.c3
-rw-r--r--scheduler.c3
-rw-r--r--st.c8
-rw-r--r--string.c5
-rw-r--r--thread.c15
-rw-r--r--thread_sync.c10
-rw-r--r--variable.c3
-rw-r--r--vm.c3
-rw-r--r--vm_backtrace.c17
-rw-r--r--vm_method.c5
-rw-r--r--vm_trace.c2
22 files changed, 93 insertions, 56 deletions
diff --git a/array.c b/array.c
index 83e4d186bd..a1e0c13659 100644
--- a/array.c
+++ b/array.c
@@ -3074,11 +3074,13 @@ ary_rotate_ptr(VALUE *ptr, long len, long cnt)
VALUE tmp = *ptr;
memmove(ptr, ptr + 1, sizeof(VALUE)*(len - 1));
*(ptr + len - 1) = tmp;
- } else if (cnt == len - 1) {
+ }
+ else if (cnt == len - 1) {
VALUE tmp = *(ptr + len - 1);
memmove(ptr + 1, ptr, sizeof(VALUE)*(len - 1));
*ptr = tmp;
- } else {
+ }
+ else {
--len;
if (cnt < len) ary_reverse(ptr + cnt, ptr + len);
if (--cnt > 0) ary_reverse(ptr, ptr + cnt);
diff --git a/compile.c b/compile.c
index 3139e9807f..c58a6c204a 100644
--- a/compile.c
+++ b/compile.c
@@ -2155,7 +2155,8 @@ fix_sp_depth(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
}
if (lobj->sp == -1) {
lobj->sp = sp;
- } else if (lobj->sp != sp) {
+ }
+ else if (lobj->sp != sp) {
debugs("%s:%d: sp inconsistency found but ignored (" LABEL_FORMAT " sp: %d, calculated sp: %d)\n",
RSTRING_PTR(rb_iseq_path(iseq)), line,
lobj->label_no, lobj->sp, sp);
@@ -4997,7 +4998,7 @@ compile_massign(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
struct masgn_attrasgn *memo = state.first_memo, *tmp_memo;
while (memo) {
VALUE topn_arg = INT2FIX((state.num_args - memo->argn) + memo->lhs_pos);
- for(int i = 0; i < memo->num_args; i++) {
+ for (int i = 0; i < memo->num_args; i++) {
INSERT_BEFORE_INSN1(memo->before_insn, memo->line_node, topn, topn_arg);
}
tmp_memo = memo->next;
@@ -5211,7 +5212,7 @@ defined_expr0(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
}
if (explicit_receiver) {
defined_expr0(iseq, ret, node->nd_recv, lfinish, Qfalse, true);
- switch(nd_type(node->nd_recv)) {
+ switch (nd_type(node->nd_recv)) {
case NODE_CALL:
case NODE_OPCALL:
case NODE_VCALL:
@@ -11696,7 +11697,8 @@ ibf_load_object_string(const struct ibf_load *load, const struct ibf_object_head
VALUE str;
if (header->frozen && !header->internal) {
str = rb_enc_interned_str(ptr, len, rb_enc_from_index(encindex));
- } else {
+ }
+ else {
str = rb_enc_str_new(ptr, len, rb_enc_from_index(encindex));
if (header->internal) rb_obj_hide(str);
diff --git a/enumerator.c b/enumerator.c
index fac83b4360..9c91e999b8 100644
--- a/enumerator.c
+++ b/enumerator.c
@@ -2701,7 +2701,8 @@ lazy_with_index_proc(VALUE proc_entry, struct MEMO* result, VALUE memos, long me
}
static VALUE
-lazy_with_index_size(VALUE proc, VALUE receiver) {
+lazy_with_index_size(VALUE proc, VALUE receiver)
+{
return receiver;
}
@@ -3144,7 +3145,8 @@ enum_chain_initialize(VALUE obj, VALUE enums)
}
static VALUE
-new_enum_chain(VALUE enums) {
+new_enum_chain(VALUE enums)
+{
long i;
VALUE obj = enum_chain_initialize(enum_chain_allocate(rb_cEnumChain), enums);
diff --git a/gc.c b/gc.c
index 8ea6f72c6c..cdf584678a 100644
--- a/gc.c
+++ b/gc.c
@@ -2289,7 +2289,7 @@ rvargc_find_contiguous_slots(int slots, RVALUE *freelist)
RVALUE *cursor = freelist;
RVALUE *previous_region = NULL;
- while(cursor) {
+ while (cursor) {
int i;
RVALUE *search = cursor;
for (i = 0; i < (slots - 1); i++) {
@@ -3551,7 +3551,7 @@ objspace_each_objects_try(VALUE arg)
while (cursor_end < pend) {
int payload_len = 0;
- while(cursor_end < pend && BUILTIN_TYPE((VALUE)cursor_end) != T_PAYLOAD) {
+ while (cursor_end < pend && BUILTIN_TYPE((VALUE)cursor_end) != T_PAYLOAD) {
cursor_end++;
}
@@ -4869,7 +4869,7 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
#else
- if(mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
+ if (mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
#endif
rb_bug("Couldn't protect page %p", (void *)body);
}
@@ -4886,7 +4886,7 @@ unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
#else
- if(mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
+ if (mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
#endif
rb_bug("Couldn't unprotect page %p", (void *)body);
}
@@ -4944,7 +4944,7 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
* T_NONE, it is an object that just got freed but hasn't been
* added to the freelist yet */
- while(1) {
+ while (1) {
size_t index;
bits_t *mark_bits = cursor->mark_bits;
@@ -4955,7 +4955,8 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
index = BITMAP_INDEX(heap->compact_cursor_index);
p = heap->compact_cursor_index;
GC_ASSERT(cursor == GET_HEAP_PAGE(p));
- } else {
+ }
+ else {
index = 0;
p = cursor->start;
}
@@ -4967,7 +4968,8 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
if (index == 0) {
p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
- } else {
+ }
+ else {
p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start)) + (BITS_BITLENGTH * index);
}
@@ -5010,7 +5012,7 @@ gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
{
struct heap_page *cursor = heap->compact_cursor;
- while(cursor) {
+ while (cursor) {
unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
cursor = list_next(&heap->pages, cursor, page_node);
}
@@ -5227,7 +5229,7 @@ gc_fill_swept_page_plane(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, b
/* Zombie slots don't get marked, but we can't reuse
* their memory until they have their finalizers run.*/
if (BUILTIN_TYPE(dest) != T_ZOMBIE) {
- if(!try_move(objspace, heap, sweep_page, dest)) {
+ if (!try_move(objspace, heap, sweep_page, dest)) {
*finished_compacting = true;
(void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
gc_report(5, objspace, "Quit compacting, couldn't find an object to move\n");
@@ -10059,11 +10061,11 @@ gc_compact_stats(rb_execution_context_t *ec, VALUE self)
VALUE moved = rb_hash_new();
for (i=0; i<T_MASK; i++) {
- if(objspace->rcompactor.considered_count_table[i]) {
+ if (objspace->rcompactor.considered_count_table[i]) {
rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
}
- if(objspace->rcompactor.moved_count_table[i]) {
+ if (objspace->rcompactor.moved_count_table[i]) {
rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
}
}
diff --git a/hash.c b/hash.c
index 88e0771f51..375342a89a 100644
--- a/hash.c
+++ b/hash.c
@@ -6510,7 +6510,7 @@ env_clone(int argc, VALUE *argv, VALUE obj)
rb_scan_args(argc, argv, "0:", &opt);
if (!NIL_P(opt)) {
rb_get_kwargs(opt, keyword_ids, 0, 1, &kwfreeze);
- switch(kwfreeze) {
+ switch (kwfreeze) {
case Qtrue:
rb_raise(rb_eTypeError, "cannot freeze ENV");
break;
diff --git a/include/ruby/internal/core/rbignum.h b/include/ruby/internal/core/rbignum.h
index 89db566501..3cd7d19850 100644
--- a/include/ruby/internal/core/rbignum.h
+++ b/include/ruby/internal/core/rbignum.h
@@ -37,13 +37,15 @@ int rb_big_sign(VALUE num);
RBIMPL_SYMBOL_EXPORT_END()
static inline bool
-RBIGNUM_POSITIVE_P(VALUE b) {
+RBIGNUM_POSITIVE_P(VALUE b)
+{
RBIMPL_ASSERT_TYPE(b, RUBY_T_BIGNUM);
return RBIGNUM_SIGN(b);
}
static inline bool
-RBIGNUM_NEGATIVE_P(VALUE b) {
+RBIGNUM_NEGATIVE_P(VALUE b)
+{
RBIMPL_ASSERT_TYPE(b, RUBY_T_BIGNUM);
return ! RBIGNUM_POSITIVE_P(b);
}
diff --git a/io.c b/io.c
index 63b024013e..a0a5389c1b 100644
--- a/io.c
+++ b/io.c
@@ -1289,7 +1289,8 @@ rb_io_wait(VALUE io, VALUE events, VALUE timeout)
if (ready > 0) {
return RB_INT2NUM(ready);
- } else {
+ }
+ else {
return Qfalse;
}
}
@@ -1323,7 +1324,8 @@ rb_io_wait_readable(int f)
return RTEST(
rb_fiber_scheduler_io_wait_readable(scheduler, rb_io_from_fd(f))
);
- } else {
+ }
+ else {
rb_thread_wait_fd(f);
}
return TRUE;
@@ -1365,7 +1367,8 @@ rb_io_wait_writable(int f)
return RTEST(
rb_fiber_scheduler_io_wait_writable(scheduler, rb_io_from_fd(f))
);
- } else {
+ }
+ else {
rb_thread_fd_writable(f);
}
return TRUE;
diff --git a/memory_view.c b/memory_view.c
index 9609ecae3f..6e422c1476 100644
--- a/memory_view.c
+++ b/memory_view.c
@@ -128,7 +128,8 @@ static const rb_data_type_t memory_view_entry_data_type = {
/* Register memory view functions for the given class */
bool
-rb_memory_view_register(VALUE klass, const rb_memory_view_entry_t *entry) {
+rb_memory_view_register(VALUE klass, const rb_memory_view_entry_t *entry)
+{
Check_Type(klass, T_CLASS);
VALUE entry_obj = rb_ivar_lookup(klass, id_memory_view, Qnil);
if (! NIL_P(entry_obj)) {
@@ -372,7 +373,8 @@ get_format_size(const char *format, bool *native_p, ssize_t *alignment, endianne
}
static inline ssize_t
-calculate_padding(ssize_t total, ssize_t alignment_size) {
+calculate_padding(ssize_t total, ssize_t alignment_size)
+{
if (alignment_size > 1) {
ssize_t res = total % alignment_size;
if (res > 0) {
diff --git a/mjit_worker.c b/mjit_worker.c
index 046d3a9faf..c6528a9bda 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -1142,7 +1142,7 @@ compile_prelude(FILE *f)
fprintf(f, "#include \"");
// print pch_file except .gch for gcc, but keep .pch for mswin
for (; s < e; s++) {
- switch(*s) {
+ switch (*s) {
case '\\': case '"':
fputc('\\', f);
}
diff --git a/object.c b/object.c
index 887d3b560e..244f2a6bba 100644
--- a/object.c
+++ b/object.c
@@ -393,7 +393,7 @@ special_object_p(VALUE obj)
static VALUE
obj_freeze_opt(VALUE freeze)
{
- switch(freeze) {
+ switch (freeze) {
case Qfalse:
case Qtrue:
case Qnil:
diff --git a/parse.y b/parse.y
index 0790641f44..6b42b6b31b 100644
--- a/parse.y
+++ b/parse.y
@@ -7145,7 +7145,7 @@ tokadd_string(struct parser_params *p,
int i;
char escbuf[5];
snprintf(escbuf, sizeof(escbuf), "\\x%02X", c);
- for(i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++) {
tokadd(p, escbuf[i]);
}
continue;
diff --git a/proc.c b/proc.c
index 390c67424e..f9e04ce4a1 100644
--- a/proc.c
+++ b/proc.c
@@ -2635,7 +2635,8 @@ umethod_bind_call(int argc, VALUE *argv, VALUE method)
if (data->me == (const rb_method_entry_t *)cme) {
vm_passed_block_handler_set(ec, proc_to_block_handler(passed_procval));
return rb_vm_call_kw(ec, recv, cme->called_id, argc, argv, cme, RB_PASS_CALLED_KEYWORDS);
- } else {
+ }
+ else {
VALUE methclass, klass, iclass;
const rb_method_entry_t *me;
convert_umethod_to_method_components(data, recv, &methclass, &klass, &iclass, &me);
diff --git a/scheduler.c b/scheduler.c
index 1531bd0e35..ca457b0d14 100644
--- a/scheduler.c
+++ b/scheduler.c
@@ -102,7 +102,8 @@ rb_fiber_scheduler_current_for_threadptr(rb_thread_t *thread)
if (thread->blocking == 0) {
return thread->scheduler;
- } else {
+ }
+ else {
return Qnil;
}
}
diff --git a/st.c b/st.c
index 9919f0a734..53e9dc8320 100644
--- a/st.c
+++ b/st.c
@@ -841,7 +841,8 @@ find_table_entry_ind(st_table *tab, st_hash_t hash_value, st_data_t key)
return REBUILT_TABLE_ENTRY_IND;
if (eq_p)
break;
- } else if (EMPTY_BIN_P(bin))
+ }
+ else if (EMPTY_BIN_P(bin))
return UNDEFINED_ENTRY_IND;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
@@ -886,7 +887,8 @@ find_table_bin_ind(st_table *tab, st_hash_t hash_value, st_data_t key)
return REBUILT_TABLE_BIN_IND;
if (eq_p)
break;
- } else if (EMPTY_BIN_P(bin))
+ }
+ else if (EMPTY_BIN_P(bin))
return UNDEFINED_BIN_IND;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
@@ -2120,7 +2122,7 @@ st_rehash_indexed(st_table *tab)
continue;
ind = hash_bin(p->hash, tab);
- for(;;) {
+ for (;;) {
st_index_t bin = get_bin(bins, size_ind, ind);
if (EMPTY_OR_DELETED_BIN_P(bin)) {
/* ok, new room */
diff --git a/string.c b/string.c
index 145a153096..d763120095 100644
--- a/string.c
+++ b/string.c
@@ -3718,7 +3718,8 @@ rb_str_index_m(int argc, VALUE *argv, VALUE str)
if (rb_reg_search(sub, str, pos, 0) < 0) {
return Qnil;
- } else {
+ }
+ else {
VALUE match = rb_backref_get();
struct re_registers *regs = RMATCH_REGS(match);
pos = rb_str_sublen(str, BEG(0));
@@ -10132,7 +10133,7 @@ rb_str_rpartition(VALUE str, VALUE sep)
else {
pos = rb_str_sublen(str, pos);
pos = rb_str_rindex(str, sep, pos);
- if(pos < 0) {
+ if (pos < 0) {
goto failed;
}
pos = rb_str_offset(str, pos);
diff --git a/thread.c b/thread.c
index 0422b23b48..1511977d4d 100644
--- a/thread.c
+++ b/thread.c
@@ -549,7 +549,8 @@ rb_threadptr_join_list_wakeup(rb_thread_t *thread)
if (target_thread->scheduler != Qnil && rb_fiberptr_blocking(join_list->fiber) == 0) {
rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
- } else {
+ }
+ else {
rb_threadptr_interrupt(target_thread);
switch (target_thread->status) {
@@ -820,7 +821,8 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
SAVE_ROOT_JMPBUF(th, thread_do_start(th));
- } else {
+ }
+ else {
errinfo = th->ec->errinfo;
if (state == TAG_FATAL) {
@@ -1182,7 +1184,8 @@ thread_join_sleep(VALUE arg)
if (scheduler != Qnil) {
rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
- } else if (!limit) {
+ }
+ else if (!limit) {
th->status = THREAD_STOPPED_FOREVER;
rb_ractor_sleeper_threads_inc(th->ractor);
rb_check_deadlock(th->ractor);
@@ -1528,7 +1531,8 @@ rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker)
VALUE scheduler = rb_fiber_scheduler_current();
if (scheduler != Qnil) {
rb_fiber_scheduler_block(scheduler, blocker, Qnil);
- } else {
+ }
+ else {
thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
}
@@ -4276,7 +4280,8 @@ do_select(VALUE p)
if (result > 0 && rb_fd_isset(set->sigwait_fd, set->rset)) {
result--;
(void)check_signals_nogvl(set->th, set->sigwait_fd);
- } else {
+ }
+ else {
(void)check_signals_nogvl(set->th, -1);
}
}
diff --git a/thread_sync.c b/thread_sync.c
index 76bf57e4dd..c6183ca438 100644
--- a/thread_sync.c
+++ b/thread_sync.c
@@ -195,7 +195,8 @@ rb_mutex_locked_p(VALUE self)
}
static void
-thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex) {
+thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
+{
if (thread->keeping_mutexes) {
mutex->next_mutex = thread->keeping_mutexes;
}
@@ -204,7 +205,8 @@ thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex) {
}
static void
-thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex) {
+thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
+{
rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
while (*keeping_mutexes && *keeping_mutexes != mutex) {
@@ -268,7 +270,9 @@ mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex)
}
}
-static VALUE call_rb_fiber_scheduler_block(VALUE mutex) {
+static VALUE
+call_rb_fiber_scheduler_block(VALUE mutex)
+{
return rb_fiber_scheduler_block(rb_fiber_scheduler_current(), mutex, Qnil);
}
diff --git a/variable.c b/variable.c
index e5642beca2..5e5f5c4bf4 100644
--- a/variable.c
+++ b/variable.c
@@ -1422,7 +1422,8 @@ init_iv_list(VALUE obj, uint32_t len, uint32_t newsize, st_table *index_tbl)
MEMCPY(newptr, ptr, VALUE, len);
RBASIC(obj)->flags &= ~ROBJECT_EMBED;
ROBJECT(obj)->as.heap.ivptr = newptr;
- } else {
+ }
+ else {
newptr = obj_ivar_heap_realloc(obj, len, newsize);
}
diff --git a/vm.c b/vm.c
index ea8b21e7a5..01afea9d4d 100644
--- a/vm.c
+++ b/vm.c
@@ -1438,7 +1438,8 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
}
if (RHASH_EMPTY_P(keyword_hash)) {
argc--;
- } else {
+ }
+ else {
((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
}
}
diff --git a/vm_backtrace.c b/vm_backtrace.c
index 237b010408..19fb8f1c41 100644
--- a/vm_backtrace.c
+++ b/vm_backtrace.c
@@ -544,7 +544,7 @@ backtrace_each(const rb_execution_context_t *ec,
}
else {
/* Ensure we don't look at frames beyond the ones requested */
- for(; from_last > 0 && start_cfp >= last_cfp; from_last--) {
+ for (; from_last > 0 && start_cfp >= last_cfp; from_last--) {
last_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(last_cfp);
}
@@ -610,7 +610,8 @@ backtrace_each(const rb_execution_context_t *ec,
ignored_frames++;
}
}
- } else {
+ }
+ else {
/* No ignored frames before start frame, just decrement start */
start -= ignored_frames;
}
@@ -629,7 +630,8 @@ backtrace_each(const rb_execution_context_t *ec,
if (cfp->iseq) {
if (cfp->pc) {
iter_iseq(arg, cfp);
- } else {
+ }
+ else {
i--;
}
}
@@ -697,12 +699,14 @@ bt_iter_iseq_skip_internal(void *ptr, const rb_control_frame_t *cfp)
loc->body.iseq.iseq = cfp->iseq;
loc->body.iseq.lineno.pc = cfp->pc;
arg->prev_loc = loc;
- } else if (arg->prev_cfp) {
+ }
+ else if (arg->prev_cfp) {
loc->type = LOCATION_TYPE_ISEQ;
loc->body.iseq.iseq = arg->prev_cfp->iseq;
loc->body.iseq.lineno.pc = arg->prev_cfp->pc;
arg->prev_loc = loc;
- } else {
+ }
+ else {
rb_bug("No non-internal backtrace entry before an <internal: backtrace entry");
}
}
@@ -724,7 +728,8 @@ bt_iter_cfunc(void *ptr, const rb_control_frame_t *cfp, ID mid)
arg->init_loc->body.iseq.iseq = iseq;
arg->init_loc->body.iseq.lineno.pc = pc;
loc->body.cfunc.prev_loc = arg->prev_loc = arg->init_loc;
- } else {
+ }
+ else {
loc->body.cfunc.prev_loc = NULL;
}
}
diff --git a/vm_method.c b/vm_method.c
index ea7d2d4daf..182d42dc1a 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -518,7 +518,7 @@ method_definition_reset(const rb_method_entry_t *me)
{
rb_method_definition_t *def = me->def;
- switch(def->type) {
+ switch (def->type) {
case VM_METHOD_TYPE_ISEQ:
RB_OBJ_WRITTEN(me, Qundef, def->body.iseq.iseqptr);
RB_OBJ_WRITTEN(me, Qundef, def->body.iseq.cref);
@@ -2048,7 +2048,8 @@ set_method_visibility(VALUE self, int argc, const VALUE *argv, rb_method_visibil
for (j = 0; j < RARRAY_LEN(v); j++) {
check_and_export_method(self, RARRAY_AREF(v, j), visi);
}
- } else {
+ }
+ else {
for (i = 0; i < argc; i++) {
check_and_export_method(self, argv[i], visi);
}
diff --git a/vm_trace.c b/vm_trace.c
index 6e2c058779..457bdb0cad 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -859,7 +859,7 @@ fill_id_and_klass(rb_trace_arg_t *trace_arg)
VALUE
rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
{
- switch(trace_arg->event) {
+ switch (trace_arg->event) {
case RUBY_EVENT_CALL:
case RUBY_EVENT_RETURN:
case RUBY_EVENT_B_CALL: