Ruby 3.5.0dev (2025-08-02 revision 30a20bc166bc37acd7dcb3788686df149c7f428a)
vm_insnhelper.c (30a20bc166bc37acd7dcb3788686df149c7f428a)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
766{
767 const VALUE *ep = cfp->ep;
769
770 while (!VM_ENV_LOCAL_P(ep)) {
771 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
772 ep = VM_ENV_PREV_EP(ep);
773 }
774
775 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
776}
777
778static const rb_iseq_t *
779method_entry_iseqptr(const rb_callable_method_entry_t *me)
780{
781 switch (me->def->type) {
782 case VM_METHOD_TYPE_ISEQ:
783 return me->def->body.iseq.iseqptr;
784 default:
785 return NULL;
786 }
787}
788
789static rb_cref_t *
790method_entry_cref(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.cref;
795 default:
796 return NULL;
797 }
798}
799
800#if VM_CHECK_MODE == 0
801PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
802#endif
803static rb_cref_t *
804check_cref(VALUE obj, int can_be_svar)
805{
806 if (obj == Qfalse) return NULL;
807
808#if VM_CHECK_MODE > 0
809 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
810#endif
811
812 switch (imemo_type(obj)) {
813 case imemo_ment:
814 return method_entry_cref((rb_callable_method_entry_t *)obj);
815 case imemo_cref:
816 return (rb_cref_t *)obj;
817 case imemo_svar:
818 if (can_be_svar) {
819 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
820 }
821 default:
822#if VM_CHECK_MODE > 0
823 rb_bug("check_method_entry: svar should not be there:");
824#endif
825 return NULL;
826 }
827}
828
829static inline rb_cref_t *
830vm_env_cref(const VALUE *ep)
831{
832 rb_cref_t *cref;
833
834 while (!VM_ENV_LOCAL_P(ep)) {
835 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
836 ep = VM_ENV_PREV_EP(ep);
837 }
838
839 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
840}
841
842static int
843is_cref(const VALUE v, int can_be_svar)
844{
845 if (RB_TYPE_P(v, T_IMEMO)) {
846 switch (imemo_type(v)) {
847 case imemo_cref:
848 return TRUE;
849 case imemo_svar:
850 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
851 default:
852 break;
853 }
854 }
855 return FALSE;
856}
857
858static int
859vm_env_cref_by_cref(const VALUE *ep)
860{
861 while (!VM_ENV_LOCAL_P(ep)) {
862 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
863 ep = VM_ENV_PREV_EP(ep);
864 }
865 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
866}
867
868static rb_cref_t *
869cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
870{
871 const VALUE v = *vptr;
872 rb_cref_t *cref, *new_cref;
873
874 if (RB_TYPE_P(v, T_IMEMO)) {
875 switch (imemo_type(v)) {
876 case imemo_cref:
877 cref = (rb_cref_t *)v;
878 new_cref = vm_cref_dup(cref);
879 if (parent) {
880 RB_OBJ_WRITE(parent, vptr, new_cref);
881 }
882 else {
883 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
884 }
885 return (rb_cref_t *)new_cref;
886 case imemo_svar:
887 if (can_be_svar) {
888 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
889 }
890 /* fall through */
891 case imemo_ment:
892 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
893 default:
894 break;
895 }
896 }
897 return NULL;
898}
899
900static rb_cref_t *
901vm_cref_replace_with_duplicated_cref(const VALUE *ep)
902{
903 if (vm_env_cref_by_cref(ep)) {
904 rb_cref_t *cref;
905 VALUE envval;
906
907 while (!VM_ENV_LOCAL_P(ep)) {
908 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
909 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
910 return cref;
911 }
912 ep = VM_ENV_PREV_EP(ep);
913 }
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
916 }
917 else {
918 rb_bug("vm_cref_dup: unreachable");
919 }
920}
921
922static rb_cref_t *
923vm_get_cref(const VALUE *ep)
924{
925 rb_cref_t *cref = vm_env_cref(ep);
926
927 if (cref != NULL) {
928 return cref;
929 }
930 else {
931 rb_bug("vm_get_cref: unreachable");
932 }
933}
934
935rb_cref_t *
936rb_vm_get_cref(const VALUE *ep)
937{
938 return vm_get_cref(ep);
939}
940
941static rb_cref_t *
942vm_ec_cref(const rb_execution_context_t *ec)
943{
944 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
945
946 if (cfp == NULL) {
947 return NULL;
948 }
949 return vm_get_cref(cfp->ep);
950}
951
952static const rb_cref_t *
953vm_get_const_key_cref(const VALUE *ep)
954{
955 const rb_cref_t *cref = vm_get_cref(ep);
956 const rb_cref_t *key_cref = cref;
957
958 while (cref) {
959 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
960 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
961 return key_cref;
962 }
963 cref = CREF_NEXT(cref);
964 }
965
966 /* does not include singleton class */
967 return NULL;
968}
969
970rb_cref_t *
971rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
972{
973 rb_cref_t *new_cref_head = NULL;
974 rb_cref_t *new_cref_tail = NULL;
975
976 #define ADD_NEW_CREF(new_cref) \
977 if (new_cref_tail) { \
978 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
979 } \
980 else { \
981 new_cref_head = new_cref; \
982 } \
983 new_cref_tail = new_cref;
984
985 while (cref) {
986 rb_cref_t *new_cref;
987 if (CREF_CLASS(cref) == old_klass) {
988 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989 ADD_NEW_CREF(new_cref);
990 return new_cref_head;
991 }
992 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993 cref = CREF_NEXT(cref);
994 ADD_NEW_CREF(new_cref);
995 }
996
997 #undef ADD_NEW_CREF
998
999 // Could we just reuse the original cref?
1000 return new_cref_head;
1001}
1002
1003static rb_cref_t *
1004vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1005{
1006 rb_cref_t *prev_cref = NULL;
1007
1008 if (ep) {
1009 prev_cref = vm_env_cref(ep);
1010 }
1011 else {
1012 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1013
1014 if (cfp) {
1015 prev_cref = vm_env_cref(cfp->ep);
1016 }
1017 }
1018
1019 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1020}
1021
1022static inline VALUE
1023vm_get_cbase(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 return CREF_CLASS_FOR_DEFINITION(cref);
1028}
1029
1030static inline VALUE
1031vm_get_const_base(const VALUE *ep)
1032{
1033 const rb_cref_t *cref = vm_get_cref(ep);
1034
1035 while (cref) {
1036 if (!CREF_PUSHED_BY_EVAL(cref)) {
1037 return CREF_CLASS_FOR_DEFINITION(cref);
1038 }
1039 cref = CREF_NEXT(cref);
1040 }
1041
1042 return Qundef;
1043}
1044
1045static inline void
1046vm_check_if_namespace(VALUE klass)
1047{
1048 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1049 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1050 }
1051}
1052
1053static inline void
1054vm_ensure_not_refinement_module(VALUE self)
1055{
1056 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1057 rb_warn("not defined at the refinement, but at the outer class/module");
1058 }
1059}
1060
1061static inline VALUE
1062vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1063{
1064 return klass;
1065}
1066
1067static inline VALUE
1068vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1069{
1070 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1071 VALUE val;
1072
1073 if (NIL_P(orig_klass) && allow_nil) {
1074 /* in current lexical scope */
1075 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1076 const rb_cref_t *cref;
1077 VALUE klass = Qnil;
1078
1079 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1080 root_cref = CREF_NEXT(root_cref);
1081 }
1082 cref = root_cref;
1083 while (cref && CREF_NEXT(cref)) {
1084 if (CREF_PUSHED_BY_EVAL(cref)) {
1085 klass = Qnil;
1086 }
1087 else {
1088 klass = CREF_CLASS(cref);
1089 }
1090 cref = CREF_NEXT(cref);
1091
1092 if (!NIL_P(klass)) {
1093 VALUE av, am = 0;
1094 rb_const_entry_t *ce;
1095 search_continue:
1096 if ((ce = rb_const_lookup(klass, id))) {
1097 rb_const_warn_if_deprecated(ce, klass, id);
1098 val = ce->value;
1099 if (UNDEF_P(val)) {
1100 if (am == klass) break;
1101 am = klass;
1102 if (is_defined) return 1;
1103 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1104 rb_autoload_load(klass, id);
1105 goto search_continue;
1106 }
1107 else {
1108 if (is_defined) {
1109 return 1;
1110 }
1111 else {
1112 if (UNLIKELY(!rb_ractor_main_p())) {
1113 if (!rb_ractor_shareable_p(val)) {
1114 rb_raise(rb_eRactorIsolationError,
1115 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1116 }
1117 }
1118 return val;
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125 /* search self */
1126 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1127 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1128 }
1129 else {
1130 klass = CLASS_OF(ec->cfp->self);
1131 }
1132
1133 if (is_defined) {
1134 return rb_const_defined(klass, id);
1135 }
1136 else {
1137 return rb_const_get(klass, id);
1138 }
1139 }
1140 else {
1141 vm_check_if_namespace(orig_klass);
1142 if (is_defined) {
1143 return rb_public_const_defined_from(orig_klass, id);
1144 }
1145 else {
1146 return rb_public_const_get_from(orig_klass, id);
1147 }
1148 }
1149}
1150
1151VALUE
1152rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1153{
1154 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1155}
1156
1157static inline VALUE
1158vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1159{
1160 VALUE val = Qnil;
1161 int idx = 0;
1162 int allow_nil = TRUE;
1163 if (segments[0] == idNULL) {
1164 val = rb_cObject;
1165 idx++;
1166 allow_nil = FALSE;
1167 }
1168 while (segments[idx]) {
1169 ID id = segments[idx++];
1170 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1171 allow_nil = FALSE;
1172 }
1173 return val;
1174}
1175
1176
1177static inline VALUE
1178vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1179{
1180 VALUE klass;
1181
1182 if (!cref) {
1183 rb_bug("vm_get_cvar_base: no cref");
1184 }
1185
1186 while (CREF_NEXT(cref) &&
1187 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1188 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1189 cref = CREF_NEXT(cref);
1190 }
1191 if (top_level_raise && !CREF_NEXT(cref)) {
1192 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1193 }
1194
1195 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1196
1197 if (NIL_P(klass)) {
1198 rb_raise(rb_eTypeError, "no class variables available");
1199 }
1200 return klass;
1201}
1202
1203ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1204static inline void
1205fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1206{
1207 if (is_attr) {
1208 vm_cc_attr_index_set(cc, index, shape_id);
1209 }
1210 else {
1211 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1212 }
1213}
1214
1215#define ractor_incidental_shareable_p(cond, val) \
1216 (!(cond) || rb_ractor_shareable_p(val))
1217#define ractor_object_incidental_shareable_p(obj, val) \
1218 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1219
1220ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1221static inline VALUE
1222vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1223{
1224 VALUE fields_obj;
1225#if OPT_IC_FOR_IVAR
1226 VALUE val = Qundef;
1227 VALUE *ivar_list;
1228
1229 if (SPECIAL_CONST_P(obj)) {
1230 return default_value;
1231 }
1232
1233 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(obj);
1234
1235 switch (BUILTIN_TYPE(obj)) {
1236 case T_OBJECT:
1237 ivar_list = ROBJECT_FIELDS(obj);
1238 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1239 break;
1240 case T_CLASS:
1241 case T_MODULE:
1242 {
1243 if (UNLIKELY(!rb_ractor_main_p())) {
1244 // For two reasons we can only use the fast path on the main
1245 // ractor.
1246 // First, only the main ractor is allowed to set ivars on classes
1247 // and modules. So we can skip locking.
1248 // Second, other ractors need to check the shareability of the
1249 // values returned from the class ivars.
1250
1251 if (default_value == Qundef) { // defined?
1252 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1253 }
1254 else {
1255 goto general_path;
1256 }
1257 }
1258
1259 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1260 if (!fields_obj) {
1261 return default_value;
1262 }
1263 ivar_list = rb_imemo_fields_ptr(fields_obj);
1264 shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1265
1266 break;
1267 }
1268 default:
1269 if (rb_obj_exivar_p(obj)) {
1270 VALUE fields_obj = 0;
1271 if (!rb_gen_fields_tbl_get(obj, id, &fields_obj)) {
1272 return default_value;
1273 }
1274 ivar_list = rb_imemo_fields_ptr(fields_obj);
1275 }
1276 else {
1277 return default_value;
1278 }
1279 }
1280
1281 shape_id_t cached_id;
1282 attr_index_t index;
1283
1284 if (is_attr) {
1285 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1286 }
1287 else {
1288 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1289 }
1290
1291 if (LIKELY(cached_id == shape_id)) {
1292 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1293
1294 if (index == ATTR_INDEX_NOT_SET) {
1295 return default_value;
1296 }
1297
1298 val = ivar_list[index];
1299#if USE_DEBUG_COUNTER
1300 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1301
1302 if (RB_TYPE_P(obj, T_OBJECT)) {
1303 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1304 }
1305#endif
1306 RUBY_ASSERT(!UNDEF_P(val));
1307 }
1308 else { // cache miss case
1309#if USE_DEBUG_COUNTER
1310 if (is_attr) {
1311 if (cached_id != INVALID_SHAPE_ID) {
1312 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1313 }
1314 else {
1315 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1316 }
1317 }
1318 else {
1319 if (cached_id != INVALID_SHAPE_ID) {
1320 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1321 }
1322 else {
1323 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1324 }
1325 }
1326 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1327
1328 if (RB_TYPE_P(obj, T_OBJECT)) {
1329 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1330 }
1331#endif
1332
1333 if (rb_shape_too_complex_p(shape_id)) {
1334 st_table *table = NULL;
1335 switch (BUILTIN_TYPE(obj)) {
1336 case T_CLASS:
1337 case T_MODULE:
1338 table = rb_imemo_fields_complex_tbl(fields_obj);
1339 break;
1340
1341 case T_OBJECT:
1342 table = ROBJECT_FIELDS_HASH(obj);
1343 break;
1344
1345 default: {
1346 VALUE fields_obj;
1347 if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
1348 table = rb_imemo_fields_complex_tbl(fields_obj);
1349 }
1350 break;
1351 }
1352 }
1353
1354 if (!table || !st_lookup(table, id, &val)) {
1355 val = default_value;
1356 }
1357 }
1358 else {
1359 shape_id_t previous_cached_id = cached_id;
1360 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1361 // This fills in the cache with the shared cache object.
1362 // "ent" is the shared cache object
1363 if (cached_id != previous_cached_id) {
1364 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1365 }
1366
1367 if (index == ATTR_INDEX_NOT_SET) {
1368 val = default_value;
1369 }
1370 else {
1371 // We fetched the ivar list above
1372 val = ivar_list[index];
1373 RUBY_ASSERT(!UNDEF_P(val));
1374 }
1375 }
1376 else {
1377 if (is_attr) {
1378 vm_cc_attr_index_initialize(cc, shape_id);
1379 }
1380 else {
1381 vm_ic_attr_index_initialize(ic, shape_id);
1382 }
1383
1384 val = default_value;
1385 }
1386 }
1387
1388 }
1389
1390 if (!UNDEF_P(default_value)) {
1391 RUBY_ASSERT(!UNDEF_P(val));
1392 }
1393
1394 RB_GC_GUARD(fields_obj);
1395 return val;
1396
1397general_path:
1398#endif /* OPT_IC_FOR_IVAR */
1399 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1400
1401 if (is_attr) {
1402 return rb_attr_get(obj, id);
1403 }
1404 else {
1405 return rb_ivar_get(obj, id);
1406 }
1407}
1408
1409static void
1410populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1411{
1412 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1413
1414 // Cache population code
1415 if (is_attr) {
1416 vm_cc_attr_index_set(cc, index, next_shape_id);
1417 }
1418 else {
1419 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1420 }
1421}
1422
1423ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1424NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1425NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1426
1427static VALUE
1428vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1429{
1430#if OPT_IC_FOR_IVAR
1431 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1432
1433 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1434 rb_check_frozen(obj);
1435
1436 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1437
1438 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1439
1440 if (!rb_shape_too_complex_p(next_shape_id)) {
1441 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1442 }
1443
1444 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1445 return val;
1446 }
1447#endif
1448 return rb_ivar_set(obj, id, val);
1449}
1450
1451static VALUE
1452vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1453{
1454 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1455}
1456
1457static VALUE
1458vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1459{
1460 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1461}
1462
1463NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1464static VALUE
1465vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1466{
1467 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1468
1469 VALUE fields_obj = 0;
1470
1471 // Cache hit case
1472 if (shape_id == dest_shape_id) {
1473 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1474 }
1475 else if (dest_shape_id != INVALID_SHAPE_ID) {
1476 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1477 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1478 }
1479 else {
1480 return Qundef;
1481 }
1482 }
1483 else {
1484 return Qundef;
1485 }
1486
1487 rb_gen_fields_tbl_get(obj, 0, &fields_obj);
1488
1489 if (shape_id != dest_shape_id) {
1490 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1491 }
1492
1493 RB_OBJ_WRITE(obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1494
1495 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1496
1497 return val;
1498}
1499
1500static inline VALUE
1501vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1502{
1503#if OPT_IC_FOR_IVAR
1504 switch (BUILTIN_TYPE(obj)) {
1505 case T_OBJECT:
1506 {
1507 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1508
1509 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1510 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1511
1512 if (LIKELY(shape_id == dest_shape_id)) {
1513 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1514 VM_ASSERT(!rb_ractor_shareable_p(obj));
1515 }
1516 else if (dest_shape_id != INVALID_SHAPE_ID) {
1517 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1518 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1519
1520 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1521
1522 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1523 }
1524 else {
1525 break;
1526 }
1527 }
1528 else {
1529 break;
1530 }
1531
1532 VALUE *ptr = ROBJECT_FIELDS(obj);
1533
1534 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1535 RB_OBJ_WRITE(obj, &ptr[index], val);
1536
1537 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1538 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1539 return val;
1540 }
1541 break;
1542 case T_CLASS:
1543 case T_MODULE:
1544 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1545 default:
1546 break;
1547 }
1548
1549 return Qundef;
1550#endif /* OPT_IC_FOR_IVAR */
1551}
1552
1553static VALUE
1554update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1555{
1556 VALUE defined_class = 0;
1557 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1558
1559 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1560 defined_class = RBASIC(defined_class)->klass;
1561 }
1562
1563 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1564 if (!rb_cvc_tbl) {
1565 rb_bug("the cvc table should be set");
1566 }
1567
1568 VALUE ent_data;
1569 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1570 rb_bug("should have cvar cache entry");
1571 }
1572
1573 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1574
1575 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1576 ent->cref = cref;
1577 ic->entry = ent;
1578
1579 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1580 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1581 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1582 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1583
1584 return cvar_value;
1585}
1586
1587static inline VALUE
1588vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1589{
1590 const rb_cref_t *cref;
1591 cref = vm_get_cref(GET_EP());
1592
1593 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1594 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1595
1596 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1597 RUBY_ASSERT(!UNDEF_P(v));
1598
1599 return v;
1600 }
1601
1602 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1603
1604 return update_classvariable_cache(iseq, klass, id, cref, ic);
1605}
1606
1607VALUE
1608rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1609{
1610 return vm_getclassvariable(iseq, cfp, id, ic);
1611}
1612
1613static inline void
1614vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1615{
1616 const rb_cref_t *cref;
1617 cref = vm_get_cref(GET_EP());
1618
1619 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1620 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1621
1622 rb_class_ivar_set(ic->entry->class_value, id, val);
1623 return;
1624 }
1625
1626 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1627
1628 rb_cvar_set(klass, id, val);
1629
1630 update_classvariable_cache(iseq, klass, id, cref, ic);
1631}
1632
1633void
1634rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1635{
1636 vm_setclassvariable(iseq, cfp, id, val, ic);
1637}
1638
1639static inline VALUE
1640vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1641{
1642 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1643}
1644
1645static inline void
1646vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1647{
1648 if (RB_SPECIAL_CONST_P(obj)) {
1650 return;
1651 }
1652
1653 shape_id_t dest_shape_id;
1654 attr_index_t index;
1655 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1656
1657 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1658 switch (BUILTIN_TYPE(obj)) {
1659 case T_OBJECT:
1660 case T_CLASS:
1661 case T_MODULE:
1662 break;
1663 default:
1664 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1665 return;
1666 }
1667 }
1668 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1669 }
1670}
1671
1672void
1673rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1674{
1675 vm_setinstancevariable(iseq, obj, id, val, ic);
1676}
1677
1678static VALUE
1679vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1680{
1681 /* continue throw */
1682
1683 if (FIXNUM_P(err)) {
1684 ec->tag->state = RUBY_TAG_FATAL;
1685 }
1686 else if (SYMBOL_P(err)) {
1687 ec->tag->state = TAG_THROW;
1688 }
1689 else if (THROW_DATA_P(err)) {
1690 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1691 }
1692 else {
1693 ec->tag->state = TAG_RAISE;
1694 }
1695 return err;
1696}
1697
1698static VALUE
1699vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1700 const int flag, const VALUE throwobj)
1701{
1702 const rb_control_frame_t *escape_cfp = NULL;
1703 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1704
1705 if (flag != 0) {
1706 /* do nothing */
1707 }
1708 else if (state == TAG_BREAK) {
1709 int is_orphan = 1;
1710 const VALUE *ep = GET_EP();
1711 const rb_iseq_t *base_iseq = GET_ISEQ();
1712 escape_cfp = reg_cfp;
1713
1714 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1715 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1716 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1717 ep = escape_cfp->ep;
1718 base_iseq = escape_cfp->iseq;
1719 }
1720 else {
1721 ep = VM_ENV_PREV_EP(ep);
1722 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1723 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1724 VM_ASSERT(escape_cfp->iseq == base_iseq);
1725 }
1726 }
1727
1728 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1729 /* lambda{... break ...} */
1730 is_orphan = 0;
1731 state = TAG_RETURN;
1732 }
1733 else {
1734 ep = VM_ENV_PREV_EP(ep);
1735
1736 while (escape_cfp < eocfp) {
1737 if (escape_cfp->ep == ep) {
1738 const rb_iseq_t *const iseq = escape_cfp->iseq;
1739 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1740 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1741 unsigned int i;
1742
1743 if (!ct) break;
1744 for (i=0; i < ct->size; i++) {
1745 const struct iseq_catch_table_entry *const entry =
1746 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1747
1748 if (entry->type == CATCH_TYPE_BREAK &&
1749 entry->iseq == base_iseq &&
1750 entry->start < epc && entry->end >= epc) {
1751 if (entry->cont == epc) { /* found! */
1752 is_orphan = 0;
1753 }
1754 break;
1755 }
1756 }
1757 break;
1758 }
1759
1760 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1761 }
1762 }
1763
1764 if (is_orphan) {
1765 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1766 }
1767 }
1768 else if (state == TAG_RETRY) {
1769 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1770
1771 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1772 }
1773 else if (state == TAG_RETURN) {
1774 const VALUE *current_ep = GET_EP();
1775 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1776 int in_class_frame = 0;
1777 int toplevel = 1;
1778 escape_cfp = reg_cfp;
1779
1780 // find target_lep, target_ep
1781 while (!VM_ENV_LOCAL_P(ep)) {
1782 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1783 target_ep = ep;
1784 }
1785 ep = VM_ENV_PREV_EP(ep);
1786 }
1787 target_lep = ep;
1788
1789 while (escape_cfp < eocfp) {
1790 const VALUE *lep = VM_CF_LEP(escape_cfp);
1791
1792 if (!target_lep) {
1793 target_lep = lep;
1794 }
1795
1796 if (lep == target_lep &&
1797 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1798 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1799 in_class_frame = 1;
1800 target_lep = 0;
1801 }
1802
1803 if (lep == target_lep) {
1804 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1805 toplevel = 0;
1806 if (in_class_frame) {
1807 /* lambda {class A; ... return ...; end} */
1808 goto valid_return;
1809 }
1810 else {
1811 const VALUE *tep = current_ep;
1812
1813 while (target_lep != tep) {
1814 if (escape_cfp->ep == tep) {
1815 /* in lambda */
1816 if (tep == target_ep) {
1817 goto valid_return;
1818 }
1819 else {
1820 goto unexpected_return;
1821 }
1822 }
1823 tep = VM_ENV_PREV_EP(tep);
1824 }
1825 }
1826 }
1827 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1828 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1829 case ISEQ_TYPE_TOP:
1830 case ISEQ_TYPE_MAIN:
1831 if (toplevel) {
1832 if (in_class_frame) goto unexpected_return;
1833 if (target_ep == NULL) {
1834 goto valid_return;
1835 }
1836 else {
1837 goto unexpected_return;
1838 }
1839 }
1840 break;
1841 case ISEQ_TYPE_EVAL: {
1842 const rb_iseq_t *is = escape_cfp->iseq;
1843 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1844 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1845 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1846 t = ISEQ_BODY(is)->type;
1847 }
1848 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1849 break;
1850 }
1851 case ISEQ_TYPE_CLASS:
1852 toplevel = 0;
1853 break;
1854 default:
1855 break;
1856 }
1857 }
1858 }
1859
1860 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1861 if (target_ep == NULL) {
1862 goto valid_return;
1863 }
1864 else {
1865 goto unexpected_return;
1866 }
1867 }
1868
1869 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1870 }
1871 unexpected_return:;
1872 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1873
1874 valid_return:;
1875 /* do nothing */
1876 }
1877 else {
1878 rb_bug("isns(throw): unsupported throw type");
1879 }
1880
1881 ec->tag->state = state;
1882 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1883}
1884
1885static VALUE
1886vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1887 rb_num_t throw_state, VALUE throwobj)
1888{
1889 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1890 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1891
1892 if (state != 0) {
1893 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1894 }
1895 else {
1896 return vm_throw_continue(ec, throwobj);
1897 }
1898}
1899
1900VALUE
1901rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1902{
1903 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1904}
1905
1906static inline void
1907vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1908{
1909 int is_splat = flag & 0x01;
1910 const VALUE *ptr;
1911 rb_num_t len;
1912 const VALUE obj = ary;
1913
1914 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1915 ary = obj;
1916 ptr = &ary;
1917 len = 1;
1918 }
1919 else {
1920 ptr = RARRAY_CONST_PTR(ary);
1921 len = (rb_num_t)RARRAY_LEN(ary);
1922 }
1923
1924 if (num + is_splat == 0) {
1925 /* no space left on stack */
1926 }
1927 else if (flag & 0x02) {
1928 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1929 rb_num_t i = 0, j;
1930
1931 if (len < num) {
1932 for (i = 0; i < num - len; i++) {
1933 *cfp->sp++ = Qnil;
1934 }
1935 }
1936
1937 for (j = 0; i < num; i++, j++) {
1938 VALUE v = ptr[len - j - 1];
1939 *cfp->sp++ = v;
1940 }
1941
1942 if (is_splat) {
1943 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1944 }
1945 }
1946 else {
1947 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1948 if (is_splat) {
1949 if (num > len) {
1950 *cfp->sp++ = rb_ary_new();
1951 }
1952 else {
1953 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1954 }
1955 }
1956
1957 if (num > len) {
1958 rb_num_t i = 0;
1959 for (; i < num - len; i++) {
1960 *cfp->sp++ = Qnil;
1961 }
1962
1963 for (rb_num_t j = 0; i < num; i++, j++) {
1964 *cfp->sp++ = ptr[len - j - 1];
1965 }
1966 }
1967 else {
1968 for (rb_num_t j = 0; j < num; j++) {
1969 *cfp->sp++ = ptr[num - j - 1];
1970 }
1971 }
1972 }
1973
1974 RB_GC_GUARD(ary);
1975}
1976
1977static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1978
1979static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1980
1981static struct rb_class_cc_entries *
1982vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1983{
1984 int initial_capa = 2;
1985 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
1986#if VM_CHECK_MODE > 0
1987 ccs->debug_sig = ~(VALUE)ccs;
1988#endif
1989 ccs->capa = initial_capa;
1990 ccs->len = 0;
1991 ccs->cme = cme;
1992 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1993
1994 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1995 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
1996 return ccs;
1997}
1998
1999static void
2000vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2001{
2002 if (! vm_cc_markable(cc)) {
2003 return;
2004 }
2005
2006 if (UNLIKELY(ccs->len == ccs->capa)) {
2007 RUBY_ASSERT(ccs->capa > 0);
2008 ccs->capa *= 2;
2009 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2010#if VM_CHECK_MODE > 0
2011 ccs->debug_sig = ~(VALUE)ccs;
2012#endif
2013 // GC?
2014 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2015 }
2016 VM_ASSERT(ccs->len < ccs->capa);
2017
2018 const int pos = ccs->len++;
2019 ccs->entries[pos].argc = vm_ci_argc(ci);
2020 ccs->entries[pos].flag = vm_ci_flag(ci);
2021 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2022
2023 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2024 // for tuning
2025 // vm_mtbl_dump(klass, 0);
2026 }
2027}
2028
2029#if VM_CHECK_MODE > 0
2030void
2031rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2032{
2033 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2034 for (int i=0; i<ccs->len; i++) {
2035 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2036 ccs->entries[i].flag,
2037 ccs->entries[i].argc);
2038 rp(ccs->entries[i].cc);
2039 }
2040}
2041
2042static int
2043vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2044{
2045 VM_ASSERT(vm_ccs_p(ccs));
2046 VM_ASSERT(ccs->len <= ccs->capa);
2047
2048 for (int i=0; i<ccs->len; i++) {
2049 const struct rb_callcache *cc = ccs->entries[i].cc;
2050
2051 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2052 VM_ASSERT(vm_cc_class_check(cc, klass));
2053 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2054 VM_ASSERT(!vm_cc_super_p(cc));
2055 VM_ASSERT(!vm_cc_refinement_p(cc));
2056 }
2057 return TRUE;
2058}
2059#endif
2060
2061const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2062
2063static void
2064vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2065{
2066 ASSERT_vm_locking();
2067
2068 if (rb_multi_ractor_p()) {
2069 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2070 // Another ractor updated the CC table while we were waiting on the VM lock.
2071 // We have to retry.
2072 return;
2073 }
2074
2075 struct rb_class_cc_entries *ccs = NULL;
2076 rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs);
2077
2078 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2079 // Another ractor replaced that entry while we were waiting on the VM lock.
2080 return;
2081 }
2082
2083 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2084 rb_vm_cc_table_delete(new_table, mid);
2085 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2086 }
2087 else {
2088 rb_vm_cc_table_delete(cc_tbl, mid);
2089 }
2090}
2091
2092static const struct rb_callcache *
2093vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2094{
2095 ASSERT_vm_locking();
2096
2097 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2098 const VALUE original_cc_table = cc_tbl;
2099 struct rb_class_cc_entries *ccs = NULL;
2100
2101 if (!cc_tbl) {
2102 cc_tbl = rb_vm_cc_table_create(1);
2103 }
2104 else if (rb_multi_ractor_p()) {
2105 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2106 }
2107
2108 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2109
2110 const rb_callable_method_entry_t *cme;
2111
2112 if (ccs) {
2113 cme = ccs->cme;
2114 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2115
2116 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2117 }
2118 else {
2119 cme = rb_callable_method_entry(klass, mid);
2120 }
2121
2122 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2123
2124 if (cme == NULL) {
2125 // undef or not found: can't cache the information
2126 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2127 return &vm_empty_cc;
2128 }
2129
2130 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2131
2132 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2133
2134 if (ccs == NULL) {
2135 VM_ASSERT(cc_tbl);
2136
2137 if (!LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs))) {
2138 // TODO: required?
2139 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2140 }
2141 }
2142
2143 cme = rb_check_overloaded_cme(cme, ci);
2144
2145 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2146 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2147
2148 VM_ASSERT(vm_cc_cme(cc) != NULL);
2149 VM_ASSERT(cme->called_id == mid);
2150 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2151
2152 if (original_cc_table != cc_tbl) {
2153 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2154 }
2155
2156 return cc;
2157}
2158
2159static const struct rb_callcache *
2160vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2161{
2162 VALUE cc_tbl;
2163 struct rb_class_cc_entries *ccs;
2164retry:
2165 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2166 ccs = NULL;
2167
2168 if (cc_tbl) {
2169 // CCS data is keyed on method id, so we don't need the method id
2170 // for doing comparisons in the `for` loop below.
2171
2172 if (rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
2173 const int ccs_len = ccs->len;
2174
2175 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2176 RB_VM_LOCKING() {
2177 vm_evict_cc(klass, cc_tbl, mid);
2178 }
2179 goto retry;
2180 }
2181 else {
2182 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2183
2184 // We already know the method id is correct because we had
2185 // to look up the ccs_data by method id. All we need to
2186 // compare is argc and flag
2187 unsigned int argc = vm_ci_argc(ci);
2188 unsigned int flag = vm_ci_flag(ci);
2189
2190 for (int i=0; i<ccs_len; i++) {
2191 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2192 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2193 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2194
2195 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2196
2197 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2198 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2199
2200 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2201 VM_ASSERT(ccs_cc->klass == klass);
2202 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2203
2204 return ccs_cc;
2205 }
2206 }
2207 }
2208 }
2209 }
2210
2211 RB_GC_GUARD(cc_tbl);
2212 return NULL;
2213}
2214
2215static const struct rb_callcache *
2216vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2217{
2218 const ID mid = vm_ci_mid(ci);
2219
2220 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2221 if (cc) {
2222 return cc;
2223 }
2224
2225 RB_VM_LOCKING() {
2226 if (rb_multi_ractor_p()) {
2227 // The CC may have been populated by another ractor while we were waiting on the lock,
2228 // so we must lookup a second time.
2229 cc = vm_lookup_cc(klass, ci, mid);
2230 }
2231
2232 if (!cc) {
2233 cc = vm_populate_cc(klass, ci, mid);
2234 }
2235 }
2236
2237 return cc;
2238}
2239
2240const struct rb_callcache *
2241rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2242{
2243 const struct rb_callcache *cc;
2244
2245 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2246
2247 cc = vm_search_cc(klass, ci);
2248
2249 VM_ASSERT(cc);
2250 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2251 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2252 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2253 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2254 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2255
2256 return cc;
2257}
2258
2259static const struct rb_callcache *
2260vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2261{
2262#if USE_DEBUG_COUNTER
2263 const struct rb_callcache *old_cc = cd->cc;
2264#endif
2265
2266 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2267
2268#if OPT_INLINE_METHOD_CACHE
2269 cd->cc = cc;
2270
2271 const struct rb_callcache *empty_cc = &vm_empty_cc;
2272 if (cd_owner && cc != empty_cc) {
2273 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2274 }
2275
2276#if USE_DEBUG_COUNTER
2277 if (!old_cc || old_cc == empty_cc) {
2278 // empty
2279 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2280 }
2281 else if (old_cc == cc) {
2282 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2283 }
2284 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2285 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2286 }
2287 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2288 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2289 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2290 }
2291 else {
2292 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2293 }
2294#endif
2295#endif // OPT_INLINE_METHOD_CACHE
2296
2297 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2298 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2299
2300 return cc;
2301}
2302
2303ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2304static const struct rb_callcache *
2305vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2306{
2307 const struct rb_callcache *cc = cd->cc;
2308
2309#if OPT_INLINE_METHOD_CACHE
2310 if (LIKELY(vm_cc_class_check(cc, klass))) {
2311 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2312 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2313 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2314 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2315 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2316 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2317
2318 return cc;
2319 }
2320 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2321 }
2322 else {
2323 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2324 }
2325#endif
2326
2327 return vm_search_method_slowpath0(cd_owner, cd, klass);
2328}
2329
2330static const struct rb_callcache *
2331vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2332{
2333 VALUE klass = CLASS_OF(recv);
2334 VM_ASSERT(klass != Qfalse);
2335 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2336
2337 return vm_search_method_fastpath(cd_owner, cd, klass);
2338}
2339
2340#if __has_attribute(transparent_union)
2341typedef union {
2342 VALUE (*anyargs)(ANYARGS);
2343 VALUE (*f00)(VALUE);
2344 VALUE (*f01)(VALUE, VALUE);
2345 VALUE (*f02)(VALUE, VALUE, VALUE);
2346 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2347 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2348 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2349 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2350 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2359 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2360} __attribute__((__transparent_union__)) cfunc_type;
2361# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2362#else
2363typedef VALUE (*cfunc_type)(ANYARGS);
2364# define make_cfunc_type(f) (cfunc_type)(f)
2365#endif
2366
2367static inline int
2368check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2369{
2370 if (! me) {
2371 return false;
2372 }
2373 else {
2374 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2375 VM_ASSERT(callable_method_entry_p(me));
2376 VM_ASSERT(me->def);
2377 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2378 return false;
2379 }
2380 else {
2381#if __has_attribute(transparent_union)
2382 return me->def->body.cfunc.func == func.anyargs;
2383#else
2384 return me->def->body.cfunc.func == func;
2385#endif
2386 }
2387 }
2388}
2389
2390static inline int
2391check_method_basic_definition(const rb_callable_method_entry_t *me)
2392{
2393 return me && METHOD_ENTRY_BASIC(me);
2394}
2395
2396static inline int
2397vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2398{
2399 VM_ASSERT(iseq != NULL);
2400 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2401 return check_cfunc(vm_cc_cme(cc), func);
2402}
2403
2404#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2405#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2406
2407#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2408
2409static inline bool
2410FIXNUM_2_P(VALUE a, VALUE b)
2411{
2412 /* FIXNUM_P(a) && FIXNUM_P(b)
2413 * == ((a & 1) && (b & 1))
2414 * == a & b & 1 */
2415 SIGNED_VALUE x = a;
2416 SIGNED_VALUE y = b;
2417 SIGNED_VALUE z = x & y & 1;
2418 return z == 1;
2419}
2420
2421static inline bool
2422FLONUM_2_P(VALUE a, VALUE b)
2423{
2424#if USE_FLONUM
2425 /* FLONUM_P(a) && FLONUM_P(b)
2426 * == ((a & 3) == 2) && ((b & 3) == 2)
2427 * == ! ((a ^ 2) | (b ^ 2) & 3)
2428 */
2429 SIGNED_VALUE x = a;
2430 SIGNED_VALUE y = b;
2431 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2432 return !z;
2433#else
2434 return false;
2435#endif
2436}
2437
2438static VALUE
2439opt_equality_specialized(VALUE recv, VALUE obj)
2440{
2441 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2442 goto compare_by_identity;
2443 }
2444 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2445 goto compare_by_identity;
2446 }
2447 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2448 goto compare_by_identity;
2449 }
2450 else if (SPECIAL_CONST_P(recv)) {
2451 //
2452 }
2453 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2454 double a = RFLOAT_VALUE(recv);
2455 double b = RFLOAT_VALUE(obj);
2456
2457#if MSC_VERSION_BEFORE(1300)
2458 if (isnan(a)) {
2459 return Qfalse;
2460 }
2461 else if (isnan(b)) {
2462 return Qfalse;
2463 }
2464 else
2465#endif
2466 return RBOOL(a == b);
2467 }
2468 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2469 if (recv == obj) {
2470 return Qtrue;
2471 }
2472 else if (RB_TYPE_P(obj, T_STRING)) {
2473 return rb_str_eql_internal(obj, recv);
2474 }
2475 }
2476 return Qundef;
2477
2478 compare_by_identity:
2479 return RBOOL(recv == obj);
2480}
2481
2482static VALUE
2483opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2484{
2485 VM_ASSERT(cd_owner != NULL);
2486
2487 VALUE val = opt_equality_specialized(recv, obj);
2488 if (!UNDEF_P(val)) return val;
2489
2490 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2491 return Qundef;
2492 }
2493 else {
2494 return RBOOL(recv == obj);
2495 }
2496}
2497
2498#undef EQ_UNREDEFINED_P
2499
2500static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2501NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2502
2503static VALUE
2504opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2505{
2506 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2507
2508 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2509 return RBOOL(recv == obj);
2510 }
2511 else {
2512 return Qundef;
2513 }
2514}
2515
2516static VALUE
2517opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2518{
2519 VALUE val = opt_equality_specialized(recv, obj);
2520 if (!UNDEF_P(val)) {
2521 return val;
2522 }
2523 else {
2524 return opt_equality_by_mid_slowpath(recv, obj, mid);
2525 }
2526}
2527
2528VALUE
2529rb_equal_opt(VALUE obj1, VALUE obj2)
2530{
2531 return opt_equality_by_mid(obj1, obj2, idEq);
2532}
2533
2534VALUE
2535rb_eql_opt(VALUE obj1, VALUE obj2)
2536{
2537 return opt_equality_by_mid(obj1, obj2, idEqlP);
2538}
2539
2540extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2541extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2542
2543static VALUE
2544check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2545{
2546 switch (type) {
2547 case VM_CHECKMATCH_TYPE_WHEN:
2548 return pattern;
2549 case VM_CHECKMATCH_TYPE_RESCUE:
2550 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2551 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2552 }
2553 /* fall through */
2554 case VM_CHECKMATCH_TYPE_CASE: {
2555 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2556 }
2557 default:
2558 rb_bug("check_match: unreachable");
2559 }
2560}
2561
2562
2563#if MSC_VERSION_BEFORE(1300)
2564#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2565#else
2566#define CHECK_CMP_NAN(a, b) /* do nothing */
2567#endif
2568
2569static inline VALUE
2570double_cmp_lt(double a, double b)
2571{
2572 CHECK_CMP_NAN(a, b);
2573 return RBOOL(a < b);
2574}
2575
2576static inline VALUE
2577double_cmp_le(double a, double b)
2578{
2579 CHECK_CMP_NAN(a, b);
2580 return RBOOL(a <= b);
2581}
2582
2583static inline VALUE
2584double_cmp_gt(double a, double b)
2585{
2586 CHECK_CMP_NAN(a, b);
2587 return RBOOL(a > b);
2588}
2589
2590static inline VALUE
2591double_cmp_ge(double a, double b)
2592{
2593 CHECK_CMP_NAN(a, b);
2594 return RBOOL(a >= b);
2595}
2596
2597// Copied by vm_dump.c
2598static inline VALUE *
2599vm_base_ptr(const rb_control_frame_t *cfp)
2600{
2601 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2602
2603 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2604 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2605
2606 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2607 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2608 int params = ISEQ_BODY(cfp->iseq)->param.size;
2609
2610 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2611 bp += vm_ci_argc(ci);
2612 }
2613
2614 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2615 /* adjust `self' */
2616 bp += 1;
2617 }
2618#if VM_DEBUG_BP_CHECK
2619 if (bp != cfp->bp_check) {
2620 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2621 (long)(cfp->bp_check - GET_EC()->vm_stack),
2622 (long)(bp - GET_EC()->vm_stack));
2623 rb_bug("vm_base_ptr: unreachable");
2624 }
2625#endif
2626 return bp;
2627 }
2628 else {
2629 return NULL;
2630 }
2631}
2632
2633VALUE *
2634rb_vm_base_ptr(const rb_control_frame_t *cfp)
2635{
2636 return vm_base_ptr(cfp);
2637}
2638
2639/* method call processes with call_info */
2640
2641#include "vm_args.c"
2642
2643static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2644ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2645static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2646static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2647static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2648static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2649static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2650
2651static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2652
2653static VALUE
2654vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2655{
2656 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2657
2658 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2659}
2660
2661static VALUE
2662vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2663{
2664 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2665
2666 const struct rb_callcache *cc = calling->cc;
2667 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2668 int param = ISEQ_BODY(iseq)->param.size;
2669 int local = ISEQ_BODY(iseq)->local_table_size;
2670 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2671}
2672
2673bool
2674rb_simple_iseq_p(const rb_iseq_t *iseq)
2675{
2676 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2677 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2678 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2679 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2680 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2681 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2682 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2683 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2684}
2685
2686bool
2687rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2688{
2689 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2690 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2691 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2692 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2693 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2694 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2695 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2696 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2697}
2698
2699bool
2700rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2701{
2702 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2703 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2704 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2705 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2706 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2707 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2708 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2709}
2710
2711#define ALLOW_HEAP_ARGV (-2)
2712#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2713
2714static inline bool
2715vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2716{
2717 vm_check_canary(GET_EC(), cfp->sp);
2718 bool ret = false;
2719
2720 if (!NIL_P(ary)) {
2721 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2722 long len = RARRAY_LEN(ary);
2723 int argc = calling->argc;
2724
2725 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2726 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2727 * a temporary array, instead of trying to keeping arguments on the VM stack.
2728 */
2729 VALUE *argv = cfp->sp - argc;
2730 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2731 rb_ary_cat(argv_ary, argv, argc);
2732 rb_ary_cat(argv_ary, ptr, len);
2733 cfp->sp -= argc - 1;
2734 cfp->sp[-1] = argv_ary;
2735 calling->argc = 1;
2736 calling->heap_argv = argv_ary;
2737 RB_GC_GUARD(ary);
2738 }
2739 else {
2740 long i;
2741
2742 if (max_args >= 0 && len + argc > max_args) {
2743 /* If only a given max_args is allowed, copy up to max args.
2744 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2745 * where additional arguments are ignored.
2746 *
2747 * Also, copy up to one more argument than the maximum,
2748 * in case it is an empty keyword hash that will be removed.
2749 */
2750 calling->argc += len - (max_args - argc + 1);
2751 len = max_args - argc + 1;
2752 ret = true;
2753 }
2754 else {
2755 /* Unset heap_argv if set originally. Can happen when
2756 * forwarding modified arguments, where heap_argv was used
2757 * originally, but heap_argv not supported by the forwarded
2758 * method in all cases.
2759 */
2760 calling->heap_argv = 0;
2761 }
2762 CHECK_VM_STACK_OVERFLOW(cfp, len);
2763
2764 for (i = 0; i < len; i++) {
2765 *cfp->sp++ = ptr[i];
2766 }
2767 calling->argc += i;
2768 }
2769 }
2770
2771 return ret;
2772}
2773
2774static inline void
2775vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2776{
2777 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2778 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2779 const VALUE h = rb_hash_new_with_size(kw_len);
2780 VALUE *sp = cfp->sp;
2781 int i;
2782
2783 for (i=0; i<kw_len; i++) {
2784 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2785 }
2786 (sp-kw_len)[0] = h;
2787
2788 cfp->sp -= kw_len - 1;
2789 calling->argc -= kw_len - 1;
2790 calling->kw_splat = 1;
2791}
2792
2793static inline VALUE
2794vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2795{
2796 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2797 if (keyword_hash != Qnil) {
2798 /* Convert a non-hash keyword splat to a new hash */
2799 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2800 }
2801 }
2802 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2803 /* Convert a hash keyword splat to a new hash unless
2804 * a mutable keyword splat was passed.
2805 * Skip allocating new hash for empty keyword splat, as empty
2806 * keyword splat will be ignored by both callers.
2807 */
2808 keyword_hash = rb_hash_dup(keyword_hash);
2809 }
2810 return keyword_hash;
2811}
2812
2813static inline void
2814CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2815 struct rb_calling_info *restrict calling,
2816 const struct rb_callinfo *restrict ci, int max_args)
2817{
2818 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2819 if (IS_ARGS_KW_SPLAT(ci)) {
2820 // f(*a, **kw)
2821 VM_ASSERT(calling->kw_splat == 1);
2822
2823 cfp->sp -= 2;
2824 calling->argc -= 2;
2825 VALUE ary = cfp->sp[0];
2826 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2827
2828 // splat a
2829 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2830
2831 // put kw
2832 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2833 if (UNLIKELY(calling->heap_argv)) {
2834 rb_ary_push(calling->heap_argv, kwh);
2835 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2836 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2837 calling->kw_splat = 0;
2838 }
2839 }
2840 else {
2841 cfp->sp[0] = kwh;
2842 cfp->sp++;
2843 calling->argc++;
2844
2845 VM_ASSERT(calling->kw_splat == 1);
2846 }
2847 }
2848 else {
2849 calling->kw_splat = 0;
2850 }
2851 }
2852 else {
2853 // f(*a)
2854 VM_ASSERT(calling->kw_splat == 0);
2855
2856 cfp->sp -= 1;
2857 calling->argc -= 1;
2858 VALUE ary = cfp->sp[0];
2859
2860 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2861 goto check_keyword;
2862 }
2863
2864 // check the last argument
2865 VALUE last_hash, argv_ary;
2866 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2867 if (!IS_ARGS_KEYWORD(ci) &&
2868 RARRAY_LEN(argv_ary) > 0 &&
2869 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2870 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2871
2872 rb_ary_pop(argv_ary);
2873 if (!RHASH_EMPTY_P(last_hash)) {
2874 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2875 calling->kw_splat = 1;
2876 }
2877 }
2878 }
2879 else {
2880check_keyword:
2881 if (!IS_ARGS_KEYWORD(ci) &&
2882 calling->argc > 0 &&
2883 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2884 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2885
2886 if (RHASH_EMPTY_P(last_hash)) {
2887 calling->argc--;
2888 cfp->sp -= 1;
2889 }
2890 else {
2891 cfp->sp[-1] = rb_hash_dup(last_hash);
2892 calling->kw_splat = 1;
2893 }
2894 }
2895 }
2896 }
2897 }
2898 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2899 // f(**kw)
2900 VM_ASSERT(calling->kw_splat == 1);
2901 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2902
2903 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2904 cfp->sp--;
2905 calling->argc--;
2906 calling->kw_splat = 0;
2907 }
2908 else {
2909 cfp->sp[-1] = kwh;
2910 }
2911 }
2912 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2913 // f(k1:1, k2:2)
2914 VM_ASSERT(calling->kw_splat == 0);
2915
2916 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2917 * by creating a keyword hash.
2918 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2919 */
2920 vm_caller_setup_arg_kw(cfp, calling, ci);
2921 }
2922}
2923
2924#define USE_OPT_HIST 0
2925
2926#if USE_OPT_HIST
2927#define OPT_HIST_MAX 64
2928static int opt_hist[OPT_HIST_MAX+1];
2929
2930__attribute__((destructor))
2931static void
2932opt_hist_show_results_at_exit(void)
2933{
2934 for (int i=0; i<OPT_HIST_MAX; i++) {
2935 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2936 }
2937}
2938#endif
2939
2940static VALUE
2941vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2942 struct rb_calling_info *calling)
2943{
2944 const struct rb_callcache *cc = calling->cc;
2945 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2946 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2947 const int opt = calling->argc - lead_num;
2948 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2949 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2950 const int param = ISEQ_BODY(iseq)->param.size;
2951 const int local = ISEQ_BODY(iseq)->local_table_size;
2952 const int delta = opt_num - opt;
2953
2954 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2955
2956#if USE_OPT_HIST
2957 if (opt_pc < OPT_HIST_MAX) {
2958 opt_hist[opt]++;
2959 }
2960 else {
2961 opt_hist[OPT_HIST_MAX]++;
2962 }
2963#endif
2964
2965 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2966}
2967
2968static VALUE
2969vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2970 struct rb_calling_info *calling)
2971{
2972 const struct rb_callcache *cc = calling->cc;
2973 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2974 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2975 const int opt = calling->argc - lead_num;
2976 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2977
2978 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2979
2980#if USE_OPT_HIST
2981 if (opt_pc < OPT_HIST_MAX) {
2982 opt_hist[opt]++;
2983 }
2984 else {
2985 opt_hist[OPT_HIST_MAX]++;
2986 }
2987#endif
2988
2989 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2990}
2991
2992static void
2993args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
2994 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2995 VALUE *const locals);
2996
2997static VALUE
2998vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2999 struct rb_calling_info *calling)
3000{
3001 const struct rb_callcache *cc = calling->cc;
3002 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3003 int param_size = ISEQ_BODY(iseq)->param.size;
3004 int local_size = ISEQ_BODY(iseq)->local_table_size;
3005
3006 // Setting up local size and param size
3007 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3008
3009 local_size = local_size + vm_ci_argc(calling->cd->ci);
3010 param_size = param_size + vm_ci_argc(calling->cd->ci);
3011
3012 cfp->sp[0] = (VALUE)calling->cd->ci;
3013
3014 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3015}
3016
3017static VALUE
3018vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3019 struct rb_calling_info *calling)
3020{
3021 const struct rb_callinfo *ci = calling->cd->ci;
3022 const struct rb_callcache *cc = calling->cc;
3023
3024 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3025 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3026
3027 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3028 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3029 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3030 const int ci_kw_len = kw_arg->keyword_len;
3031 const VALUE * const ci_keywords = kw_arg->keywords;
3032 VALUE *argv = cfp->sp - calling->argc;
3033 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3034 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3035 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3036 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3037 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3038
3039 int param = ISEQ_BODY(iseq)->param.size;
3040 int local = ISEQ_BODY(iseq)->local_table_size;
3041 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3042}
3043
3044static VALUE
3045vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3046 struct rb_calling_info *calling)
3047{
3048 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3049 const struct rb_callcache *cc = calling->cc;
3050
3051 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3052 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3053
3054 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3055 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3056 VALUE * const argv = cfp->sp - calling->argc;
3057 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3058
3059 int i;
3060 for (i=0; i<kw_param->num; i++) {
3061 klocals[i] = kw_param->default_values[i];
3062 }
3063 klocals[i] = INT2FIX(0); // kw specify flag
3064 // NOTE:
3065 // nobody check this value, but it should be cleared because it can
3066 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3067
3068 int param = ISEQ_BODY(iseq)->param.size;
3069 int local = ISEQ_BODY(iseq)->local_table_size;
3070 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3071}
3072
3073static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3074
3075static VALUE
3076vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3077 struct rb_calling_info *calling)
3078{
3079 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3080 cfp->sp -= (calling->argc + 1);
3081 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3082 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3083}
3084
3085VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3086
3087static void
3088warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3089{
3090 rb_vm_t *vm = GET_VM();
3091 set_table *dup_check_table = vm->unused_block_warning_table;
3092 st_data_t key;
3093 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3094
3095 union {
3096 VALUE v;
3097 unsigned char b[SIZEOF_VALUE];
3098 } k1 = {
3099 .v = (VALUE)pc,
3100 }, k2 = {
3101 .v = (VALUE)cme->def,
3102 };
3103
3104 // relax check
3105 if (!strict_unused_block) {
3106 key = (st_data_t)cme->def->original_id;
3107
3108 if (set_table_lookup(dup_check_table, key)) {
3109 return;
3110 }
3111 }
3112
3113 // strict check
3114 // make unique key from pc and me->def pointer
3115 key = 0;
3116 for (int i=0; i<SIZEOF_VALUE; i++) {
3117 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3118 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3119 }
3120
3121 if (0) {
3122 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3123 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3124 fprintf(stderr, "key:%p\n", (void *)key);
3125 }
3126
3127 // duplication check
3128 if (set_insert(dup_check_table, key)) {
3129 // already shown
3130 }
3131 else if (RTEST(ruby_verbose) || strict_unused_block) {
3132 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3133 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3134
3135 if (!NIL_P(m_loc)) {
3136 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3137 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3138 }
3139 else {
3140 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3141 }
3142 }
3143}
3144
3145static inline int
3146vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3147 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3148{
3149 const struct rb_callinfo *ci = calling->cd->ci;
3150 const struct rb_callcache *cc = calling->cc;
3151
3152 VM_ASSERT((vm_ci_argc(ci), 1));
3153 VM_ASSERT(vm_cc_cme(cc) != NULL);
3154
3155 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3156 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3157 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3158 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3159 }
3160
3161 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3162 if (LIKELY(rb_simple_iseq_p(iseq))) {
3163 rb_control_frame_t *cfp = ec->cfp;
3164 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3165 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3166
3167 if (calling->argc != lead_num) {
3168 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3169 }
3170
3171 //VM_ASSERT(ci == calling->cd->ci);
3172 VM_ASSERT(cc == calling->cc);
3173
3174 if (vm_call_iseq_optimizable_p(ci, cc)) {
3175 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3176 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3177 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3178 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3179 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3180 }
3181 else {
3182 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3183 }
3184 }
3185 return 0;
3186 }
3187 else if (rb_iseq_only_optparam_p(iseq)) {
3188 rb_control_frame_t *cfp = ec->cfp;
3189
3190 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3191 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3192
3193 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3194 const int argc = calling->argc;
3195 const int opt = argc - lead_num;
3196
3197 if (opt < 0 || opt > opt_num) {
3198 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3199 }
3200
3201 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3202 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3203 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3204 vm_call_cacheable(ci, cc));
3205 }
3206 else {
3207 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3208 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3209 vm_call_cacheable(ci, cc));
3210 }
3211
3212 /* initialize opt vars for self-references */
3213 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3214 for (int i=argc; i<lead_num + opt_num; i++) {
3215 argv[i] = Qnil;
3216 }
3217 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3218 }
3219 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3220 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3221 const int argc = calling->argc;
3222 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3223
3224 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3225 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3226
3227 if (argc - kw_arg->keyword_len == lead_num) {
3228 const int ci_kw_len = kw_arg->keyword_len;
3229 const VALUE * const ci_keywords = kw_arg->keywords;
3230 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3231 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3232
3233 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3234 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3235
3236 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3237 vm_call_cacheable(ci, cc));
3238
3239 return 0;
3240 }
3241 }
3242 else if (argc == lead_num) {
3243 /* no kwarg */
3244 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3245 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3246
3247 if (klocals[kw_param->num] == INT2FIX(0)) {
3248 /* copy from default_values */
3249 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3250 vm_call_cacheable(ci, cc));
3251 }
3252
3253 return 0;
3254 }
3255 }
3256 }
3257
3258 // Called iseq is using ... param
3259 // def foo(...) # <- iseq for foo will have "forwardable"
3260 //
3261 // We want to set the `...` local to the caller's CI
3262 // foo(1, 2) # <- the ci for this should end up as `...`
3263 //
3264 // So hopefully the stack looks like:
3265 //
3266 // => 1
3267 // => 2
3268 // => *
3269 // => **
3270 // => &
3271 // => ... # <- points at `foo`s CI
3272 // => cref_or_me
3273 // => specval
3274 // => type
3275 //
3276 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3277 bool can_fastpath = true;
3278
3279 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3280 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3281 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3282 ci = vm_ci_new_runtime(
3283 vm_ci_mid(ci),
3284 vm_ci_flag(ci),
3285 vm_ci_argc(ci),
3286 vm_ci_kwarg(ci));
3287 }
3288 else {
3289 ci = forward_cd->caller_ci;
3290 }
3291 can_fastpath = false;
3292 }
3293 // C functions calling iseqs will stack allocate a CI,
3294 // so we need to convert it to heap allocated
3295 if (!vm_ci_markable(ci)) {
3296 ci = vm_ci_new_runtime(
3297 vm_ci_mid(ci),
3298 vm_ci_flag(ci),
3299 vm_ci_argc(ci),
3300 vm_ci_kwarg(ci));
3301 can_fastpath = false;
3302 }
3303 argv[param_size - 1] = (VALUE)ci;
3304 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3305 return 0;
3306 }
3307
3308 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3309}
3310
3311static void
3312vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3313{
3314 // This case is when the caller is using a ... parameter.
3315 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3316 // In this case the caller's caller's CI will be on the stack.
3317 //
3318 // For example:
3319 //
3320 // def bar(a, b); a + b; end
3321 // def foo(...); bar(...); end
3322 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3323 //
3324 // Stack layout will be:
3325 //
3326 // > 1
3327 // > 2
3328 // > CI for foo(1, 2)
3329 // > cref_or_me
3330 // > specval
3331 // > type
3332 // > receiver
3333 // > CI for foo(1, 2), via `getlocal ...`
3334 // > ( SP points here )
3335 const VALUE * lep = VM_CF_LEP(cfp);
3336
3337 const rb_iseq_t *iseq;
3338
3339 // If we're in an escaped environment (lambda for example), get the iseq
3340 // from the captured env.
3341 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3342 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3343 iseq = env->iseq;
3344 }
3345 else { // Otherwise use the lep to find the caller
3346 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3347 }
3348
3349 // Our local storage is below the args we need to copy
3350 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3351
3352 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3353 VALUE * to = cfp->sp - 1; // clobber the CI
3354
3355 if (RTEST(splat)) {
3356 to -= 1; // clobber the splat array
3357 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3358 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3359 to += RARRAY_LEN(splat);
3360 }
3361
3362 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3363 MEMCPY(to, from, VALUE, argc);
3364 cfp->sp = to + argc;
3365
3366 // Stack layout should now be:
3367 //
3368 // > 1
3369 // > 2
3370 // > CI for foo(1, 2)
3371 // > cref_or_me
3372 // > specval
3373 // > type
3374 // > receiver
3375 // > 1
3376 // > 2
3377 // > ( SP points here )
3378}
3379
3380static VALUE
3381vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3382{
3383 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3384
3385 const struct rb_callcache *cc = calling->cc;
3386 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3387 int param_size = ISEQ_BODY(iseq)->param.size;
3388 int local_size = ISEQ_BODY(iseq)->local_table_size;
3389
3390 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3391
3392 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3393 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3394}
3395
3396static VALUE
3397vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3398{
3399 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3400
3401 const struct rb_callcache *cc = calling->cc;
3402 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3403 int param_size = ISEQ_BODY(iseq)->param.size;
3404 int local_size = ISEQ_BODY(iseq)->local_table_size;
3405
3406 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3407
3408 // Setting up local size and param size
3409 local_size = local_size + vm_ci_argc(calling->cd->ci);
3410 param_size = param_size + vm_ci_argc(calling->cd->ci);
3411
3412 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3413 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3414}
3415
3416static inline VALUE
3417vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3418 int opt_pc, int param_size, int local_size)
3419{
3420 const struct rb_callinfo *ci = calling->cd->ci;
3421 const struct rb_callcache *cc = calling->cc;
3422
3423 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3424 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3425 }
3426 else {
3427 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3428 }
3429}
3430
3431static inline VALUE
3432vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3433 int opt_pc, int param_size, int local_size)
3434{
3435 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3436 VALUE *argv = cfp->sp - calling->argc;
3437 VALUE *sp = argv + param_size;
3438 cfp->sp = argv - 1 /* recv */;
3439
3440 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3441 calling->block_handler, (VALUE)me,
3442 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3443 local_size - param_size,
3444 ISEQ_BODY(iseq)->stack_max);
3445 return Qundef;
3446}
3447
3448static inline VALUE
3449vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3450{
3451 const struct rb_callcache *cc = calling->cc;
3452 unsigned int i;
3453 VALUE *argv = cfp->sp - calling->argc;
3454 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3455 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3456 VALUE *src_argv = argv;
3457 VALUE *sp_orig, *sp;
3458 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3459
3460 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3461 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3462 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3463 dst_captured->code.val = src_captured->code.val;
3464 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3465 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3466 }
3467 else {
3468 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3469 }
3470 }
3471
3472 vm_pop_frame(ec, cfp, cfp->ep);
3473 cfp = ec->cfp;
3474
3475 sp_orig = sp = cfp->sp;
3476
3477 /* push self */
3478 sp[0] = calling->recv;
3479 sp++;
3480
3481 /* copy arguments */
3482 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3483 *sp++ = src_argv[i];
3484 }
3485
3486 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3487 calling->recv, calling->block_handler, (VALUE)me,
3488 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3489 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3490 ISEQ_BODY(iseq)->stack_max);
3491
3492 cfp->sp = sp_orig;
3493
3494 return Qundef;
3495}
3496
3497static void
3498ractor_unsafe_check(void)
3499{
3500 if (!rb_ractor_main_p()) {
3501 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3502 }
3503}
3504
3505static VALUE
3506call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3507{
3508 ractor_unsafe_check();
3509 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3510 return (*f)(recv, rb_ary_new4(argc, argv));
3511}
3512
3513static VALUE
3514call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3515{
3516 ractor_unsafe_check();
3517 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3518 return (*f)(argc, argv, recv);
3519}
3520
3521static VALUE
3522call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3523{
3524 ractor_unsafe_check();
3525 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3526 return (*f)(recv);
3527}
3528
3529static VALUE
3530call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3531{
3532 ractor_unsafe_check();
3533 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3534 return (*f)(recv, argv[0]);
3535}
3536
3537static VALUE
3538call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3539{
3540 ractor_unsafe_check();
3541 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3542 return (*f)(recv, argv[0], argv[1]);
3543}
3544
3545static VALUE
3546call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3547{
3548 ractor_unsafe_check();
3549 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3550 return (*f)(recv, argv[0], argv[1], argv[2]);
3551}
3552
3553static VALUE
3554call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3555{
3556 ractor_unsafe_check();
3557 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3558 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3559}
3560
3561static VALUE
3562call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3563{
3564 ractor_unsafe_check();
3565 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3566 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3567}
3568
3569static VALUE
3570call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3571{
3572 ractor_unsafe_check();
3574 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3575}
3576
3577static VALUE
3578call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3579{
3580 ractor_unsafe_check();
3582 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3583}
3584
3585static VALUE
3586call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3587{
3588 ractor_unsafe_check();
3590 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3591}
3592
3593static VALUE
3594call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3595{
3596 ractor_unsafe_check();
3598 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3599}
3600
3601static VALUE
3602call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3603{
3604 ractor_unsafe_check();
3606 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3607}
3608
3609static VALUE
3610call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3611{
3612 ractor_unsafe_check();
3614 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3615}
3616
3617static VALUE
3618call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3619{
3620 ractor_unsafe_check();
3622 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3623}
3624
3625static VALUE
3626call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3627{
3628 ractor_unsafe_check();
3630 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3631}
3632
3633static VALUE
3634call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3635{
3636 ractor_unsafe_check();
3638 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3639}
3640
3641static VALUE
3642call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643{
3644 ractor_unsafe_check();
3646 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3647}
3648
3649static VALUE
3650ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3651{
3652 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3653 return (*f)(recv, rb_ary_new4(argc, argv));
3654}
3655
3656static VALUE
3657ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3658{
3659 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3660 return (*f)(argc, argv, recv);
3661}
3662
3663static VALUE
3664ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3665{
3666 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3667 return (*f)(recv);
3668}
3669
3670static VALUE
3671ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3672{
3673 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3674 return (*f)(recv, argv[0]);
3675}
3676
3677static VALUE
3678ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3679{
3680 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3681 return (*f)(recv, argv[0], argv[1]);
3682}
3683
3684static VALUE
3685ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3686{
3687 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3688 return (*f)(recv, argv[0], argv[1], argv[2]);
3689}
3690
3691static VALUE
3692ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3693{
3694 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3695 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3696}
3697
3698static VALUE
3699ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3700{
3701 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3702 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3703}
3704
3705static VALUE
3706ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3707{
3709 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3710}
3711
3712static VALUE
3713ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3714{
3716 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3717}
3718
3719static VALUE
3720ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3721{
3723 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3724}
3725
3726static VALUE
3727ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3728{
3730 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3731}
3732
3733static VALUE
3734ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3735{
3737 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3738}
3739
3740static VALUE
3741ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3742{
3744 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3745}
3746
3747static VALUE
3748ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3749{
3751 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3752}
3753
3754static VALUE
3755ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3756{
3758 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3759}
3760
3761static VALUE
3762ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3763{
3765 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3766}
3767
3768static VALUE
3769ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3770{
3772 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3773}
3774
3775static inline int
3776vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3777{
3778 const int ov_flags = RAISED_STACKOVERFLOW;
3779 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3780 if (rb_ec_raised_p(ec, ov_flags)) {
3781 rb_ec_raised_reset(ec, ov_flags);
3782 return TRUE;
3783 }
3784 return FALSE;
3785}
3786
3787#define CHECK_CFP_CONSISTENCY(func) \
3788 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3789 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3790
3791static inline
3792const rb_method_cfunc_t *
3793vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3794{
3795#if VM_DEBUG_VERIFY_METHOD_CACHE
3796 switch (me->def->type) {
3797 case VM_METHOD_TYPE_CFUNC:
3798 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3799 break;
3800# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3801 METHOD_BUG(ISEQ);
3802 METHOD_BUG(ATTRSET);
3803 METHOD_BUG(IVAR);
3804 METHOD_BUG(BMETHOD);
3805 METHOD_BUG(ZSUPER);
3806 METHOD_BUG(UNDEF);
3807 METHOD_BUG(OPTIMIZED);
3808 METHOD_BUG(MISSING);
3809 METHOD_BUG(REFINED);
3810 METHOD_BUG(ALIAS);
3811# undef METHOD_BUG
3812 default:
3813 rb_bug("wrong method type: %d", me->def->type);
3814 }
3815#endif
3816 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3817}
3818
3819static VALUE
3820vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3821 int argc, VALUE *argv, VALUE *stack_bottom)
3822{
3823 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3824 const struct rb_callinfo *ci = calling->cd->ci;
3825 const struct rb_callcache *cc = calling->cc;
3826 VALUE val;
3827 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3828 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3829
3830 VALUE recv = calling->recv;
3831 VALUE block_handler = calling->block_handler;
3832 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3833
3834 if (UNLIKELY(calling->kw_splat)) {
3835 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3836 }
3837
3838 VM_ASSERT(reg_cfp == ec->cfp);
3839
3840 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3841 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3842
3843 vm_push_frame(ec, NULL, frame_type, recv,
3844 block_handler, (VALUE)me,
3845 0, ec->cfp->sp, 0, 0);
3846
3847 int len = cfunc->argc;
3848 if (len >= 0) rb_check_arity(argc, len, len);
3849
3850 reg_cfp->sp = stack_bottom;
3851 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3852
3853 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3854
3855 rb_vm_pop_frame(ec);
3856
3857 VM_ASSERT(ec->cfp->sp == stack_bottom);
3858
3859 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3860 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3861
3862 return val;
3863}
3864
3865// Push a C method frame for a given cme. This is called when JIT code skipped
3866// pushing a frame but the C method reached a point where a frame is needed.
3867void
3868rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3869{
3870 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3871 rb_execution_context_t *ec = GET_EC();
3872 VALUE *sp = ec->cfp->sp;
3873 VALUE recv = *(sp - recv_idx - 1);
3874 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3875 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3876#if VM_CHECK_MODE > 0
3877 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3878 *(GET_EC()->cfp->sp) = Qfalse;
3879#endif
3880 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3881}
3882
3883// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3884bool
3885rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3886{
3887 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3888}
3889
3890static VALUE
3891vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3892{
3893 int argc = calling->argc;
3894 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3895 VALUE *argv = &stack_bottom[1];
3896
3897 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3898}
3899
3900static VALUE
3901vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3902{
3903 const struct rb_callinfo *ci = calling->cd->ci;
3904 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3905
3906 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3907 VALUE argv_ary;
3908 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3909 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3910 int argc = RARRAY_LENINT(argv_ary);
3911 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3912 VALUE *stack_bottom = reg_cfp->sp - 2;
3913
3914 VM_ASSERT(calling->argc == 1);
3915 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3916 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3917
3918 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3919 }
3920 else {
3921 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3922
3923 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3924 }
3925}
3926
3927static inline VALUE
3928vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3929{
3930 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3931 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3932
3933 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3934 return vm_call_cfunc_other(ec, reg_cfp, calling);
3935 }
3936
3937 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3938 calling->kw_splat = 0;
3939 int i;
3940 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3941 VALUE *sp = stack_bottom;
3942 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3943 for(i = 0; i < argc; i++) {
3944 *++sp = argv[i];
3945 }
3946 reg_cfp->sp = sp+1;
3947
3948 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3949}
3950
3951static inline VALUE
3952vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3953{
3954 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3955 VALUE argv_ary = reg_cfp->sp[-1];
3956 int argc = RARRAY_LENINT(argv_ary);
3957 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3958 VALUE last_hash;
3959 int argc_offset = 0;
3960
3961 if (UNLIKELY(argc > 0 &&
3962 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3963 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3964 if (!RHASH_EMPTY_P(last_hash)) {
3965 return vm_call_cfunc_other(ec, reg_cfp, calling);
3966 }
3967 argc_offset++;
3968 }
3969 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3970}
3971
3972static inline VALUE
3973vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3974{
3975 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3976 VALUE keyword_hash = reg_cfp->sp[-1];
3977
3978 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3979 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3980 }
3981
3982 return vm_call_cfunc_other(ec, reg_cfp, calling);
3983}
3984
3985static VALUE
3986vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3987{
3988 const struct rb_callinfo *ci = calling->cd->ci;
3989 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3990
3991 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3992 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3993 // f(*a)
3994 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3995 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3996 }
3997 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3998 // f(*a, **kw)
3999 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4000 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4001 }
4002 }
4003
4004 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4005 return vm_call_cfunc_other(ec, reg_cfp, calling);
4006}
4007
4008static VALUE
4009vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4010{
4011 const struct rb_callcache *cc = calling->cc;
4012 RB_DEBUG_COUNTER_INC(ccf_ivar);
4013 cfp->sp -= 1;
4014 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4015 return ivar;
4016}
4017
4018static VALUE
4019vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4020{
4021 RB_DEBUG_COUNTER_INC(ccf_attrset);
4022 VALUE val = *(cfp->sp - 1);
4023 cfp->sp -= 2;
4024 attr_index_t index;
4025 shape_id_t dest_shape_id;
4026 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4027 ID id = vm_cc_cme(cc)->def->body.attr.id;
4028 rb_check_frozen(obj);
4029 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4030 if (UNDEF_P(res)) {
4031 switch (BUILTIN_TYPE(obj)) {
4032 case T_OBJECT:
4033 case T_CLASS:
4034 case T_MODULE:
4035 break;
4036 default:
4037 {
4038 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4039 if (!UNDEF_P(res)) {
4040 return res;
4041 }
4042 }
4043 }
4044 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4045 }
4046 return res;
4047}
4048
4049static VALUE
4050vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4051{
4052 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4053}
4054
4055static inline VALUE
4056vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4057{
4058 rb_proc_t *proc;
4059 VALUE val;
4060 const struct rb_callcache *cc = calling->cc;
4061 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4062 VALUE procv = cme->def->body.bmethod.proc;
4063
4064 if (!RB_OBJ_SHAREABLE_P(procv) &&
4065 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4066 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4067 }
4068
4069 /* control block frame */
4070 GetProcPtr(procv, proc);
4071 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4072
4073 return val;
4074}
4075
4076static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4077
4078static VALUE
4079vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4080{
4081 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4082
4083 const struct rb_callcache *cc = calling->cc;
4084 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4085 VALUE procv = cme->def->body.bmethod.proc;
4086
4087 if (!RB_OBJ_SHAREABLE_P(procv) &&
4088 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4089 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4090 }
4091
4092 rb_proc_t *proc;
4093 GetProcPtr(procv, proc);
4094 const struct rb_block *block = &proc->block;
4095
4096 while (vm_block_type(block) == block_type_proc) {
4097 block = vm_proc_block(block->as.proc);
4098 }
4099 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4100
4101 const struct rb_captured_block *captured = &block->as.captured;
4102 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4103 VALUE * const argv = cfp->sp - calling->argc;
4104 const int arg_size = ISEQ_BODY(iseq)->param.size;
4105
4106 int opt_pc;
4107 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4108 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4109 }
4110 else {
4111 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4112 }
4113
4114 cfp->sp = argv - 1; // -1 for the receiver
4115
4116 vm_push_frame(ec, iseq,
4117 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4118 calling->recv,
4119 VM_GUARDED_PREV_EP(captured->ep),
4120 (VALUE)cme,
4121 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4122 argv + arg_size,
4123 ISEQ_BODY(iseq)->local_table_size - arg_size,
4124 ISEQ_BODY(iseq)->stack_max);
4125
4126 return Qundef;
4127}
4128
4129static VALUE
4130vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4131{
4132 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4133
4134 VALUE *argv;
4135 int argc;
4136 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4137 if (UNLIKELY(calling->heap_argv)) {
4138 argv = RARRAY_PTR(calling->heap_argv);
4139 cfp->sp -= 2;
4140 }
4141 else {
4142 argc = calling->argc;
4143 argv = ALLOCA_N(VALUE, argc);
4144 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4145 cfp->sp += - argc - 1;
4146 }
4147
4148 return vm_call_bmethod_body(ec, calling, argv);
4149}
4150
4151static VALUE
4152vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4153{
4154 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4155
4156 const struct rb_callcache *cc = calling->cc;
4157 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4158 VALUE procv = cme->def->body.bmethod.proc;
4159 rb_proc_t *proc;
4160 GetProcPtr(procv, proc);
4161 const struct rb_block *block = &proc->block;
4162
4163 while (vm_block_type(block) == block_type_proc) {
4164 block = vm_proc_block(block->as.proc);
4165 }
4166 if (vm_block_type(block) == block_type_iseq) {
4167 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4168 return vm_call_iseq_bmethod(ec, cfp, calling);
4169 }
4170
4171 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4172 return vm_call_noniseq_bmethod(ec, cfp, calling);
4173}
4174
4175VALUE
4176rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4177{
4178 VALUE klass = current_class;
4179
4180 /* for prepended Module, then start from cover class */
4181 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4182 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4183 klass = RBASIC_CLASS(klass);
4184 }
4185
4186 while (RTEST(klass)) {
4187 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4188 if (owner == target_owner) {
4189 return klass;
4190 }
4191 klass = RCLASS_SUPER(klass);
4192 }
4193
4194 return current_class; /* maybe module function */
4195}
4196
4197static const rb_callable_method_entry_t *
4198aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4199{
4200 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4201 const rb_callable_method_entry_t *cme;
4202
4203 if (orig_me->defined_class == 0) {
4204 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4205 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4206 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4207
4208 if (me->def->reference_count == 1) {
4209 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4210 }
4211 else {
4213 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4214 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4215 }
4216 }
4217 else {
4218 cme = (const rb_callable_method_entry_t *)orig_me;
4219 }
4220
4221 VM_ASSERT(callable_method_entry_p(cme));
4222 return cme;
4223}
4224
4226rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4227{
4228 return aliased_callable_method_entry(me);
4229}
4230
4231static VALUE
4232vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4233{
4234 calling->cc = &VM_CC_ON_STACK(Qundef,
4235 vm_call_general,
4236 {{0}},
4237 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4238
4239 return vm_call_method_each_type(ec, cfp, calling);
4240}
4241
4242static enum method_missing_reason
4243ci_missing_reason(const struct rb_callinfo *ci)
4244{
4245 enum method_missing_reason stat = MISSING_NOENTRY;
4246 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4247 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4248 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4249 return stat;
4250}
4251
4252static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4253
4254static VALUE
4255vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4256 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4257{
4258 ASSUME(calling->argc >= 0);
4259
4260 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4261 int argc = calling->argc;
4262 VALUE recv = calling->recv;
4263 VALUE klass = CLASS_OF(recv);
4264 ID mid = rb_check_id(&symbol);
4265 flags |= VM_CALL_OPT_SEND;
4266
4267 if (UNLIKELY(! mid)) {
4268 mid = idMethodMissing;
4269 missing_reason = ci_missing_reason(ci);
4270 ec->method_missing_reason = missing_reason;
4271
4272 VALUE argv_ary;
4273 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4274 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4275 rb_ary_unshift(argv_ary, symbol);
4276
4277 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4278 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4279 VALUE exc = rb_make_no_method_exception(
4280 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4281
4282 rb_exc_raise(exc);
4283 }
4284 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4285 }
4286 else {
4287 /* E.g. when argc == 2
4288 *
4289 * | | | | TOPN
4290 * | | +------+
4291 * | | +---> | arg1 | 0
4292 * +------+ | +------+
4293 * | arg1 | -+ +-> | arg0 | 1
4294 * +------+ | +------+
4295 * | arg0 | ---+ | sym | 2
4296 * +------+ +------+
4297 * | recv | | recv | 3
4298 * --+------+--------+------+------
4299 */
4300 int i = argc;
4301 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4302 INC_SP(1);
4303 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4304 argc = ++calling->argc;
4305
4306 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4307 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4308 TOPN(i) = symbol;
4309 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4310 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4311 VALUE exc = rb_make_no_method_exception(
4312 rb_eNoMethodError, 0, recv, argc, argv, priv);
4313
4314 rb_exc_raise(exc);
4315 }
4316 else {
4317 TOPN(i) = rb_str_intern(symbol);
4318 }
4319 }
4320 }
4321
4322 struct rb_forwarding_call_data new_fcd = {
4323 .cd = {
4324 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4325 .cc = NULL,
4326 },
4327 .caller_ci = NULL,
4328 };
4329
4330 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4331 calling->cd = &new_fcd.cd;
4332 }
4333 else {
4334 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4335 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4336 new_fcd.caller_ci = caller_ci;
4337 calling->cd = (struct rb_call_data *)&new_fcd;
4338 }
4339 calling->cc = &VM_CC_ON_STACK(klass,
4340 vm_call_general,
4341 { .method_missing_reason = missing_reason },
4342 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4343
4344 if (flags & VM_CALL_FCALL) {
4345 return vm_call_method(ec, reg_cfp, calling);
4346 }
4347
4348 const struct rb_callcache *cc = calling->cc;
4349 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4350
4351 if (vm_cc_cme(cc) != NULL) {
4352 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4353 case METHOD_VISI_PUBLIC: /* likely */
4354 return vm_call_method_each_type(ec, reg_cfp, calling);
4355 case METHOD_VISI_PRIVATE:
4356 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4357 break;
4358 case METHOD_VISI_PROTECTED:
4359 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4360 break;
4361 default:
4362 VM_UNREACHABLE(vm_call_method);
4363 }
4364 return vm_call_method_missing(ec, reg_cfp, calling);
4365 }
4366
4367 return vm_call_method_nome(ec, reg_cfp, calling);
4368}
4369
4370static VALUE
4371vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4372{
4373 const struct rb_callinfo *ci = calling->cd->ci;
4374 int i;
4375 VALUE sym;
4376
4377 i = calling->argc - 1;
4378
4379 if (calling->argc == 0) {
4380 rb_raise(rb_eArgError, "no method name given");
4381 }
4382
4383 sym = TOPN(i);
4384 /* E.g. when i == 2
4385 *
4386 * | | | | TOPN
4387 * +------+ | |
4388 * | arg1 | ---+ | | 0
4389 * +------+ | +------+
4390 * | arg0 | -+ +-> | arg1 | 1
4391 * +------+ | +------+
4392 * | sym | +---> | arg0 | 2
4393 * +------+ +------+
4394 * | recv | | recv | 3
4395 * --+------+--------+------+------
4396 */
4397 /* shift arguments */
4398 if (i > 0) {
4399 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4400 }
4401 calling->argc -= 1;
4402 DEC_SP(1);
4403
4404 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4405}
4406
4407static VALUE
4408vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4409{
4410 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4411 const struct rb_callinfo *ci = calling->cd->ci;
4412 int flags = VM_CALL_FCALL;
4413 VALUE sym;
4414
4415 VALUE argv_ary;
4416 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4417 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4418 sym = rb_ary_shift(argv_ary);
4419 flags |= VM_CALL_ARGS_SPLAT;
4420 if (calling->kw_splat) {
4421 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4422 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4423 calling->kw_splat = 0;
4424 }
4425 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4426 }
4427
4428 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4429 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4430}
4431
4432static VALUE
4433vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4434{
4435 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4436 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4437}
4438
4439static VALUE
4440vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4441{
4442 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4443
4444 const struct rb_callinfo *ci = calling->cd->ci;
4445 int flags = vm_ci_flag(ci);
4446
4447 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4448 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4449 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4450 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4451 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4452 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4453 }
4454
4455 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4456 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4457}
4458
4459static VALUE
4460vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4461 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4462{
4463 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4464
4465 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4466 unsigned int argc, flag;
4467
4468 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4469 argc = ++calling->argc;
4470
4471 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4472 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4473 vm_check_canary(ec, reg_cfp->sp);
4474 if (argc > 1) {
4475 MEMMOVE(argv+1, argv, VALUE, argc-1);
4476 }
4477 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4478 INC_SP(1);
4479
4480 ec->method_missing_reason = reason;
4481
4482 struct rb_forwarding_call_data new_fcd = {
4483 .cd = {
4484 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4485 .cc = NULL,
4486 },
4487 .caller_ci = NULL,
4488 };
4489
4490 if (!(flag & VM_CALL_FORWARDING)) {
4491 calling->cd = &new_fcd.cd;
4492 }
4493 else {
4494 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4495 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4496 new_fcd.caller_ci = caller_ci;
4497 calling->cd = (struct rb_call_data *)&new_fcd;
4498 }
4499
4500 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4501 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4502 return vm_call_method(ec, reg_cfp, calling);
4503}
4504
4505static VALUE
4506vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4507{
4508 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4509}
4510
4511static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4512static VALUE
4513vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4514{
4515 klass = RCLASS_SUPER(klass);
4516
4517 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4518 if (cme == NULL) {
4519 return vm_call_method_nome(ec, cfp, calling);
4520 }
4521 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4522 cme->def->body.refined.orig_me) {
4523 cme = refined_method_callable_without_refinement(cme);
4524 }
4525
4526 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4527
4528 return vm_call_method_each_type(ec, cfp, calling);
4529}
4530
4531static inline VALUE
4532find_refinement(VALUE refinements, VALUE klass)
4533{
4534 if (NIL_P(refinements)) {
4535 return Qnil;
4536 }
4537 return rb_hash_lookup(refinements, klass);
4538}
4539
4540PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4541static rb_control_frame_t *
4542current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4543{
4544 rb_control_frame_t *top_cfp = cfp;
4545
4546 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4547 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4548
4549 do {
4550 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4551 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4552 /* TODO: orphan block */
4553 return top_cfp;
4554 }
4555 } while (cfp->iseq != local_iseq);
4556 }
4557 return cfp;
4558}
4559
4560static const rb_callable_method_entry_t *
4561refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4562{
4563 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4564 const rb_callable_method_entry_t *cme;
4565
4566 if (orig_me->defined_class == 0) {
4567 cme = NULL;
4569 }
4570 else {
4571 cme = (const rb_callable_method_entry_t *)orig_me;
4572 }
4573
4574 VM_ASSERT(callable_method_entry_p(cme));
4575
4576 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4577 cme = NULL;
4578 }
4579
4580 return cme;
4581}
4582
4583static const rb_callable_method_entry_t *
4584search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4585{
4586 ID mid = vm_ci_mid(calling->cd->ci);
4587 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4588 const struct rb_callcache * const cc = calling->cc;
4589 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4590
4591 for (; cref; cref = CREF_NEXT(cref)) {
4592 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4593 if (NIL_P(refinement)) continue;
4594
4595 const rb_callable_method_entry_t *const ref_me =
4596 rb_callable_method_entry(refinement, mid);
4597
4598 if (ref_me) {
4599 if (vm_cc_call(cc) == vm_call_super_method) {
4600 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4601 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4602 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4603 continue;
4604 }
4605 }
4606
4607 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4608 cme->def != ref_me->def) {
4609 cme = ref_me;
4610 }
4611 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4612 return cme;
4613 }
4614 }
4615 else {
4616 return NULL;
4617 }
4618 }
4619
4620 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4621 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4622 }
4623 else {
4624 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4625 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4626 return cme;
4627 }
4628}
4629
4630static VALUE
4631vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4632{
4633 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4634
4635 if (ref_cme) {
4636 if (calling->cd->cc) {
4637 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4638 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4639 return vm_call_method(ec, cfp, calling);
4640 }
4641 else {
4642 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4643 calling->cc= ref_cc;
4644 return vm_call_method(ec, cfp, calling);
4645 }
4646 }
4647 else {
4648 return vm_call_method_nome(ec, cfp, calling);
4649 }
4650}
4651
4652static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4653
4654NOINLINE(static VALUE
4655 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4656 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4657
4658static VALUE
4659vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4660 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4661{
4662 int argc = calling->argc;
4663
4664 /* remove self */
4665 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4666 DEC_SP(1);
4667
4668 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4669}
4670
4671static VALUE
4672vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4673{
4674 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4675
4676 const struct rb_callinfo *ci = calling->cd->ci;
4677 VALUE procval = calling->recv;
4678 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4679}
4680
4681static VALUE
4682vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4683{
4684 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4685
4686 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4687 const struct rb_callinfo *ci = calling->cd->ci;
4688
4689 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4690 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4691 }
4692 else {
4693 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4694 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4695 return vm_call_general(ec, reg_cfp, calling);
4696 }
4697}
4698
4699static VALUE
4700vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4701{
4702 VALUE recv = calling->recv;
4703
4704 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4705 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4706 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4707
4708 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4709 return internal_RSTRUCT_GET(recv, off);
4710}
4711
4712static VALUE
4713vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4714{
4715 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4716
4717 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4718 reg_cfp->sp -= 1;
4719 return ret;
4720}
4721
4722static VALUE
4723vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4724{
4725 VALUE recv = calling->recv;
4726
4727 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4728 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4729 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4730
4731 rb_check_frozen(recv);
4732
4733 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4734 internal_RSTRUCT_SET(recv, off, val);
4735
4736 return val;
4737}
4738
4739static VALUE
4740vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4741{
4742 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4743
4744 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4745 reg_cfp->sp -= 2;
4746 return ret;
4747}
4748
4749NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4750 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4751
4752#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4753 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4754 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4755 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4756 var = func; \
4757 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4758 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4759 } \
4760 else { \
4761 nohook; \
4762 var = func; \
4763 }
4764
4765static VALUE
4766vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4767 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4768{
4769 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4770 case OPTIMIZED_METHOD_TYPE_SEND:
4771 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4772 return vm_call_opt_send(ec, cfp, calling);
4773 case OPTIMIZED_METHOD_TYPE_CALL:
4774 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4775 return vm_call_opt_call(ec, cfp, calling);
4776 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4777 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4778 return vm_call_opt_block_call(ec, cfp, calling);
4779 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4780 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4781 rb_check_arity(calling->argc, 0, 0);
4782
4783 VALUE v;
4784 VM_CALL_METHOD_ATTR(v,
4785 vm_call_opt_struct_aref(ec, cfp, calling),
4786 set_vm_cc_ivar(cc); \
4787 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4788 return v;
4789 }
4790 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4791 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4792 rb_check_arity(calling->argc, 1, 1);
4793
4794 VALUE v;
4795 VM_CALL_METHOD_ATTR(v,
4796 vm_call_opt_struct_aset(ec, cfp, calling),
4797 set_vm_cc_ivar(cc); \
4798 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4799 return v;
4800 }
4801 default:
4802 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4803 }
4804}
4805
4806static VALUE
4807vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4808{
4809 const struct rb_callinfo *ci = calling->cd->ci;
4810 const struct rb_callcache *cc = calling->cc;
4811 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4812 VALUE v;
4813
4814 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4815
4816 switch (cme->def->type) {
4817 case VM_METHOD_TYPE_ISEQ:
4818 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4819 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4820 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4821 }
4822 else {
4823 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4824 return vm_call_iseq_setup(ec, cfp, calling);
4825 }
4826
4827 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4828 case VM_METHOD_TYPE_CFUNC:
4829 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4830 return vm_call_cfunc(ec, cfp, calling);
4831
4832 case VM_METHOD_TYPE_ATTRSET:
4833 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4834
4835 rb_check_arity(calling->argc, 1, 1);
4836
4837 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4838
4839 if (vm_cc_markable(cc)) {
4840 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4841 VM_CALL_METHOD_ATTR(v,
4842 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4843 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4844 }
4845 else {
4846 cc = &((struct rb_callcache) {
4847 .flags = T_IMEMO |
4848 (imemo_callcache << FL_USHIFT) |
4849 VM_CALLCACHE_UNMARKABLE |
4850 VM_CALLCACHE_ON_STACK,
4851 .klass = cc->klass,
4852 .cme_ = cc->cme_,
4853 .call_ = cc->call_,
4854 .aux_ = {
4855 .attr = {
4856 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4857 }
4858 },
4859 });
4860
4861 VM_CALL_METHOD_ATTR(v,
4862 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4863 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4864 }
4865 return v;
4866
4867 case VM_METHOD_TYPE_IVAR:
4868 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4869 rb_check_arity(calling->argc, 0, 0);
4870 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4871 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4872 VM_CALL_METHOD_ATTR(v,
4873 vm_call_ivar(ec, cfp, calling),
4874 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4875 return v;
4876
4877 case VM_METHOD_TYPE_MISSING:
4878 vm_cc_method_missing_reason_set(cc, 0);
4879 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4880 return vm_call_method_missing(ec, cfp, calling);
4881
4882 case VM_METHOD_TYPE_BMETHOD:
4883 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4884 return vm_call_bmethod(ec, cfp, calling);
4885
4886 case VM_METHOD_TYPE_ALIAS:
4887 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4888 return vm_call_alias(ec, cfp, calling);
4889
4890 case VM_METHOD_TYPE_OPTIMIZED:
4891 return vm_call_optimized(ec, cfp, calling, ci, cc);
4892
4893 case VM_METHOD_TYPE_UNDEF:
4894 break;
4895
4896 case VM_METHOD_TYPE_ZSUPER:
4897 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4898
4899 case VM_METHOD_TYPE_REFINED:
4900 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4901 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4902 return vm_call_refined(ec, cfp, calling);
4903 }
4904
4905 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4906}
4907
4908NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4909
4910static VALUE
4911vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4912{
4913 /* method missing */
4914 const struct rb_callinfo *ci = calling->cd->ci;
4915 const int stat = ci_missing_reason(ci);
4916
4917 if (vm_ci_mid(ci) == idMethodMissing) {
4918 if (UNLIKELY(calling->heap_argv)) {
4919 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4920 }
4921 else {
4922 rb_control_frame_t *reg_cfp = cfp;
4923 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4924 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4925 }
4926 }
4927 else {
4928 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4929 }
4930}
4931
4932/* Protected method calls and super invocations need to check that the receiver
4933 * (self for super) inherits the module on which the method is defined.
4934 * In the case of refinements, it should consider the original class not the
4935 * refinement.
4936 */
4937static VALUE
4938vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4939{
4940 VALUE defined_class = me->defined_class;
4941 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4942 return NIL_P(refined_class) ? defined_class : refined_class;
4943}
4944
4945static inline VALUE
4946vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4947{
4948 const struct rb_callinfo *ci = calling->cd->ci;
4949 const struct rb_callcache *cc = calling->cc;
4950
4951 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4952
4953 if (vm_cc_cme(cc) != NULL) {
4954 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4955 case METHOD_VISI_PUBLIC: /* likely */
4956 return vm_call_method_each_type(ec, cfp, calling);
4957
4958 case METHOD_VISI_PRIVATE:
4959 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4960 enum method_missing_reason stat = MISSING_PRIVATE;
4961 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4962
4963 vm_cc_method_missing_reason_set(cc, stat);
4964 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4965 return vm_call_method_missing(ec, cfp, calling);
4966 }
4967 return vm_call_method_each_type(ec, cfp, calling);
4968
4969 case METHOD_VISI_PROTECTED:
4970 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4971 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4972 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4973 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4974 return vm_call_method_missing(ec, cfp, calling);
4975 }
4976 else {
4977 /* caching method info to dummy cc */
4978 VM_ASSERT(vm_cc_cme(cc) != NULL);
4979 struct rb_callcache cc_on_stack = *cc;
4980 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4981 calling->cc = &cc_on_stack;
4982 return vm_call_method_each_type(ec, cfp, calling);
4983 }
4984 }
4985 return vm_call_method_each_type(ec, cfp, calling);
4986
4987 default:
4988 rb_bug("unreachable");
4989 }
4990 }
4991 else {
4992 return vm_call_method_nome(ec, cfp, calling);
4993 }
4994}
4995
4996static VALUE
4997vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4998{
4999 RB_DEBUG_COUNTER_INC(ccf_general);
5000 return vm_call_method(ec, reg_cfp, calling);
5001}
5002
5003void
5004rb_vm_cc_general(const struct rb_callcache *cc)
5005{
5006 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5007 VM_ASSERT(cc != vm_cc_empty());
5008
5009 *(vm_call_handler *)&cc->call_ = vm_call_general;
5010}
5011
5012static VALUE
5013vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5014{
5015 RB_DEBUG_COUNTER_INC(ccf_super_method);
5016
5017 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5018 // can merge the function and the address of the function becomes same.
5019 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5020 if (ec == NULL) rb_bug("unreachable");
5021
5022 /* this check is required to distinguish with other functions. */
5023 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5024 return vm_call_method(ec, reg_cfp, calling);
5025}
5026
5027/* super */
5028
5029static inline VALUE
5030vm_search_normal_superclass(VALUE klass)
5031{
5032 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5033 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5034 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5035 klass = RBASIC(klass)->klass;
5036 }
5037 klass = RCLASS_ORIGIN(klass);
5038 return RCLASS_SUPER(klass);
5039}
5040
5041NORETURN(static void vm_super_outside(void));
5042
5043static void
5044vm_super_outside(void)
5045{
5046 rb_raise(rb_eNoMethodError, "super called outside of method");
5047}
5048
5049static const struct rb_callcache *
5050empty_cc_for_super(void)
5051{
5052 return &vm_empty_cc_for_super;
5053}
5054
5055static const struct rb_callcache *
5056vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5057{
5058 VALUE current_defined_class;
5059 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5060
5061 if (!me) {
5062 vm_super_outside();
5063 }
5064
5065 current_defined_class = vm_defined_class_for_protected_call(me);
5066
5067 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5068 reg_cfp->iseq != method_entry_iseqptr(me) &&
5069 !rb_obj_is_kind_of(recv, current_defined_class)) {
5070 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5071 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5072
5073 if (m) { /* not bound UnboundMethod */
5074 rb_raise(rb_eTypeError,
5075 "self has wrong type to call super in this context: "
5076 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5077 rb_obj_class(recv), m);
5078 }
5079 }
5080
5081 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5082 rb_raise(rb_eRuntimeError,
5083 "implicit argument passing of super from method defined"
5084 " by define_method() is not supported."
5085 " Specify all arguments explicitly.");
5086 }
5087
5088 ID mid = me->def->original_id;
5089
5090 if (!vm_ci_markable(cd->ci)) {
5091 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5092 }
5093 else {
5094 // update iseq. really? (TODO)
5095 cd->ci = vm_ci_new_runtime(mid,
5096 vm_ci_flag(cd->ci),
5097 vm_ci_argc(cd->ci),
5098 vm_ci_kwarg(cd->ci));
5099
5100 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5101 }
5102
5103 const struct rb_callcache *cc;
5104
5105 VALUE klass = vm_search_normal_superclass(me->defined_class);
5106
5107 if (!klass) {
5108 /* bound instance method of module */
5109 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5110 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5111 }
5112 else {
5113 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5114 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5115
5116 // define_method can cache for different method id
5117 if (cached_cme == NULL) {
5118 // empty_cc_for_super is not markable object
5119 cd->cc = empty_cc_for_super();
5120 }
5121 else if (cached_cme->called_id != mid) {
5122 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5123 if (cme) {
5124 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5125 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5126 }
5127 else {
5128 cd->cc = cc = empty_cc_for_super();
5129 }
5130 }
5131 else {
5132 switch (cached_cme->def->type) {
5133 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5134 case VM_METHOD_TYPE_REFINED:
5135 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5136 case VM_METHOD_TYPE_ATTRSET:
5137 case VM_METHOD_TYPE_IVAR:
5138 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5139 break;
5140 default:
5141 break; // use fastpath
5142 }
5143 }
5144 }
5145
5146 VM_ASSERT((vm_cc_cme(cc), true));
5147
5148 return cc;
5149}
5150
5151/* yield */
5152
5153static inline int
5154block_proc_is_lambda(const VALUE procval)
5155{
5156 rb_proc_t *proc;
5157
5158 if (procval) {
5159 GetProcPtr(procval, proc);
5160 return proc->is_lambda;
5161 }
5162 else {
5163 return 0;
5164 }
5165}
5166
5167static inline const rb_namespace_t *
5168block_proc_namespace(const VALUE procval)
5169{
5170 rb_proc_t *proc;
5171
5172 if (procval) {
5173 GetProcPtr(procval, proc);
5174 return proc->ns;
5175 }
5176 else {
5177 return NULL;
5178 }
5179}
5180
5181static VALUE
5182vm_yield_with_cfunc(rb_execution_context_t *ec,
5183 const struct rb_captured_block *captured,
5184 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5186{
5187 int is_lambda = FALSE; /* TODO */
5188 VALUE val, arg, blockarg;
5189 int frame_flag;
5190 const struct vm_ifunc *ifunc = captured->code.ifunc;
5191
5192 if (is_lambda) {
5193 arg = rb_ary_new4(argc, argv);
5194 }
5195 else if (argc == 0) {
5196 arg = Qnil;
5197 }
5198 else {
5199 arg = argv[0];
5200 }
5201
5202 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5203
5204 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5205 if (kw_splat) {
5206 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5207 }
5208
5209 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5210 frame_flag,
5211 self,
5212 VM_GUARDED_PREV_EP(captured->ep),
5213 (VALUE)me,
5214 0, ec->cfp->sp, 0, 0);
5215 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5216 rb_vm_pop_frame(ec);
5217
5218 return val;
5219}
5220
5221VALUE
5222rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5223{
5224 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5225}
5226
5227static VALUE
5228vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5229{
5230 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5231}
5232
5233static inline int
5234vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5235{
5236 int i;
5237 long len = RARRAY_LEN(ary);
5238
5239 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5240
5241 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5242 argv[i] = RARRAY_AREF(ary, i);
5243 }
5244
5245 return i;
5246}
5247
5248static inline VALUE
5249vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5250{
5251 VALUE ary, arg0 = argv[0];
5252 ary = rb_check_array_type(arg0);
5253#if 0
5254 argv[0] = arg0;
5255#else
5256 VM_ASSERT(argv[0] == arg0);
5257#endif
5258 return ary;
5259}
5260
5261static int
5262vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5263{
5264 if (rb_simple_iseq_p(iseq)) {
5265 rb_control_frame_t *cfp = ec->cfp;
5266 VALUE arg0;
5267
5268 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5269
5270 if (arg_setup_type == arg_setup_block &&
5271 calling->argc == 1 &&
5272 ISEQ_BODY(iseq)->param.flags.has_lead &&
5273 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5274 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5275 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5276 }
5277
5278 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5279 if (arg_setup_type == arg_setup_block) {
5280 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5281 int i;
5282 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5283 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5284 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5285 }
5286 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5287 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5288 }
5289 }
5290 else {
5291 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5292 }
5293 }
5294
5295 return 0;
5296 }
5297 else {
5298 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5299 }
5300}
5301
5302static int
5303vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5304{
5305 struct rb_calling_info calling_entry, *calling;
5306
5307 calling = &calling_entry;
5308 calling->argc = argc;
5309 calling->block_handler = block_handler;
5310 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5311 calling->recv = Qundef;
5312 calling->heap_argv = 0;
5313 calling->cc = NULL;
5314 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5315
5316 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5317}
5318
5319/* ruby iseq -> ruby block */
5320
5321static VALUE
5322vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5323 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5324 bool is_lambda, VALUE block_handler)
5325{
5326 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5327 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5328 const int arg_size = ISEQ_BODY(iseq)->param.size;
5329 VALUE * const rsp = GET_SP() - calling->argc;
5330 VALUE * const argv = rsp;
5331 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5332 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5333
5334 SET_SP(rsp);
5335
5336 if (calling->proc_ns) {
5337 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5338 }
5339
5340 vm_push_frame(ec, iseq,
5341 frame_flag,
5342 captured->self,
5343 VM_GUARDED_PREV_EP(captured->ep), 0,
5344 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5345 rsp + arg_size,
5346 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5347
5348 return Qundef;
5349}
5350
5351static VALUE
5352vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5353 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5354 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5355{
5356 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5357 int flags = vm_ci_flag(ci);
5358
5359 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5360 ((calling->argc == 0) ||
5361 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5362 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5363 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5364 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5365 flags = 0;
5366 if (UNLIKELY(calling->heap_argv)) {
5367#if VM_ARGC_STACK_MAX < 0
5368 if (RARRAY_LEN(calling->heap_argv) < 1) {
5369 rb_raise(rb_eArgError, "no receiver given");
5370 }
5371#endif
5372 calling->recv = rb_ary_shift(calling->heap_argv);
5373 // Modify stack to avoid cfp consistency error
5374 reg_cfp->sp++;
5375 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5376 reg_cfp->sp[-2] = calling->recv;
5377 flags |= VM_CALL_ARGS_SPLAT;
5378 }
5379 else {
5380 if (calling->argc < 1) {
5381 rb_raise(rb_eArgError, "no receiver given");
5382 }
5383 calling->recv = TOPN(--calling->argc);
5384 }
5385 if (calling->kw_splat) {
5386 flags |= VM_CALL_KW_SPLAT;
5387 }
5388 }
5389 else {
5390 if (calling->argc < 1) {
5391 rb_raise(rb_eArgError, "no receiver given");
5392 }
5393 calling->recv = TOPN(--calling->argc);
5394 }
5395
5396 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5397}
5398
5399static VALUE
5400vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5401 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5402 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5403{
5404 VALUE val;
5405 int argc;
5406 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5407 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5408 argc = calling->argc;
5409 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5410 POPN(argc); /* TODO: should put before C/yield? */
5411 return val;
5412}
5413
5414static VALUE
5415vm_proc_to_block_handler(VALUE procval)
5416{
5417 const struct rb_block *block = vm_proc_block(procval);
5418
5419 switch (vm_block_type(block)) {
5420 case block_type_iseq:
5421 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5422 case block_type_ifunc:
5423 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5424 case block_type_symbol:
5425 return VM_BH_FROM_SYMBOL(block->as.symbol);
5426 case block_type_proc:
5427 return VM_BH_FROM_PROC(block->as.proc);
5428 }
5429 VM_UNREACHABLE(vm_yield_with_proc);
5430 return Qundef;
5431}
5432
5433static VALUE
5434vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5435 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5436 bool is_lambda, VALUE block_handler)
5437{
5438 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5439 VALUE proc = VM_BH_TO_PROC(block_handler);
5440 if (!calling->proc_ns) {
5441 calling->proc_ns = block_proc_namespace(proc);
5442 }
5443 is_lambda = block_proc_is_lambda(proc);
5444 block_handler = vm_proc_to_block_handler(proc);
5445 }
5446
5447 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5448}
5449
5450static inline VALUE
5451vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5452 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5453 bool is_lambda, VALUE block_handler)
5454{
5455 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5456 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5457 bool is_lambda, VALUE block_handler);
5458
5459 switch (vm_block_handler_type(block_handler)) {
5460 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5461 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5462 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5463 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5464 default: rb_bug("vm_invoke_block: unreachable");
5465 }
5466
5467 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5468}
5469
5470static VALUE
5471vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5472{
5473 const rb_execution_context_t *ec = GET_EC();
5474 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5475 struct rb_captured_block *captured;
5476
5477 if (cfp == 0) {
5478 rb_bug("vm_make_proc_with_iseq: unreachable");
5479 }
5480
5481 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5482 captured->code.iseq = blockiseq;
5483
5484 return rb_vm_make_proc(ec, captured, rb_cProc);
5485}
5486
5487static VALUE
5488vm_once_exec(VALUE iseq)
5489{
5490 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5491 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5492}
5493
5494static VALUE
5495vm_once_clear(VALUE data)
5496{
5497 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5498 is->once.running_thread = NULL;
5499 return Qnil;
5500}
5501
5502/* defined insn */
5503
5504static bool
5505check_respond_to_missing(VALUE obj, VALUE v)
5506{
5507 VALUE args[2];
5508 VALUE r;
5509
5510 args[0] = obj; args[1] = Qfalse;
5511 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5512 if (!UNDEF_P(r) && RTEST(r)) {
5513 return true;
5514 }
5515 else {
5516 return false;
5517 }
5518}
5519
5520static bool
5521vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5522{
5523 VALUE klass;
5524 enum defined_type type = (enum defined_type)op_type;
5525
5526 switch (type) {
5527 case DEFINED_IVAR:
5528 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5529 break;
5530 case DEFINED_GVAR:
5531 return rb_gvar_defined(SYM2ID(obj));
5532 break;
5533 case DEFINED_CVAR: {
5534 const rb_cref_t *cref = vm_get_cref(GET_EP());
5535 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5536 return rb_cvar_defined(klass, SYM2ID(obj));
5537 break;
5538 }
5539 case DEFINED_CONST:
5540 case DEFINED_CONST_FROM: {
5541 bool allow_nil = type == DEFINED_CONST;
5542 klass = v;
5543 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5544 break;
5545 }
5546 case DEFINED_FUNC:
5547 klass = CLASS_OF(v);
5548 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5549 break;
5550 case DEFINED_METHOD:{
5551 VALUE klass = CLASS_OF(v);
5552 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5553
5554 if (me) {
5555 switch (METHOD_ENTRY_VISI(me)) {
5556 case METHOD_VISI_PRIVATE:
5557 break;
5558 case METHOD_VISI_PROTECTED:
5559 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5560 break;
5561 }
5562 case METHOD_VISI_PUBLIC:
5563 return true;
5564 break;
5565 default:
5566 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5567 }
5568 }
5569 else {
5570 return check_respond_to_missing(obj, v);
5571 }
5572 break;
5573 }
5574 case DEFINED_YIELD:
5575 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5576 return true;
5577 }
5578 break;
5579 case DEFINED_ZSUPER:
5580 {
5581 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5582
5583 if (me) {
5584 VALUE klass = vm_search_normal_superclass(me->defined_class);
5585 if (!klass) return false;
5586
5587 ID id = me->def->original_id;
5588
5589 return rb_method_boundp(klass, id, 0);
5590 }
5591 }
5592 break;
5593 case DEFINED_REF:
5594 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5595 default:
5596 rb_bug("unimplemented defined? type (VM)");
5597 break;
5598 }
5599
5600 return false;
5601}
5602
5603bool
5604rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5605{
5606 return vm_defined(ec, reg_cfp, op_type, obj, v);
5607}
5608
5609static const VALUE *
5610vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5611{
5612 rb_num_t i;
5613 const VALUE *ep = reg_ep;
5614 for (i = 0; i < lv; i++) {
5615 ep = GET_PREV_EP(ep);
5616 }
5617 return ep;
5618}
5619
5620static VALUE
5621vm_get_special_object(const VALUE *const reg_ep,
5622 enum vm_special_object_type type)
5623{
5624 switch (type) {
5625 case VM_SPECIAL_OBJECT_VMCORE:
5626 return rb_mRubyVMFrozenCore;
5627 case VM_SPECIAL_OBJECT_CBASE:
5628 return vm_get_cbase(reg_ep);
5629 case VM_SPECIAL_OBJECT_CONST_BASE:
5630 return vm_get_const_base(reg_ep);
5631 default:
5632 rb_bug("putspecialobject insn: unknown value_type %d", type);
5633 }
5634}
5635
5636// ZJIT implementation is using the C function
5637// and needs to call a non-static function
5638VALUE
5639rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5640{
5641 return vm_get_special_object(reg_ep, type);
5642}
5643
5644static VALUE
5645vm_concat_array(VALUE ary1, VALUE ary2st)
5646{
5647 const VALUE ary2 = ary2st;
5648 VALUE tmp1 = rb_check_to_array(ary1);
5649 VALUE tmp2 = rb_check_to_array(ary2);
5650
5651 if (NIL_P(tmp1)) {
5652 tmp1 = rb_ary_new3(1, ary1);
5653 }
5654 if (tmp1 == ary1) {
5655 tmp1 = rb_ary_dup(ary1);
5656 }
5657
5658 if (NIL_P(tmp2)) {
5659 return rb_ary_push(tmp1, ary2);
5660 }
5661 else {
5662 return rb_ary_concat(tmp1, tmp2);
5663 }
5664}
5665
5666static VALUE
5667vm_concat_to_array(VALUE ary1, VALUE ary2st)
5668{
5669 /* ary1 must be a newly created array */
5670 const VALUE ary2 = ary2st;
5671
5672 if (NIL_P(ary2)) return ary1;
5673
5674 VALUE tmp2 = rb_check_to_array(ary2);
5675
5676 if (NIL_P(tmp2)) {
5677 return rb_ary_push(ary1, ary2);
5678 }
5679 else {
5680 return rb_ary_concat(ary1, tmp2);
5681 }
5682}
5683
5684// YJIT implementation is using the C function
5685// and needs to call a non-static function
5686VALUE
5687rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5688{
5689 return vm_concat_array(ary1, ary2st);
5690}
5691
5692VALUE
5693rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5694{
5695 return vm_concat_to_array(ary1, ary2st);
5696}
5697
5698static VALUE
5699vm_splat_array(VALUE flag, VALUE ary)
5700{
5701 if (NIL_P(ary)) {
5702 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5703 }
5704 VALUE tmp = rb_check_to_array(ary);
5705 if (NIL_P(tmp)) {
5706 return rb_ary_new3(1, ary);
5707 }
5708 else if (RTEST(flag)) {
5709 return rb_ary_dup(tmp);
5710 }
5711 else {
5712 return tmp;
5713 }
5714}
5715
5716// YJIT implementation is using the C function
5717// and needs to call a non-static function
5718VALUE
5719rb_vm_splat_array(VALUE flag, VALUE ary)
5720{
5721 return vm_splat_array(flag, ary);
5722}
5723
5724static VALUE
5725vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5726{
5727 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5728
5729 if (flag & VM_CHECKMATCH_ARRAY) {
5730 long i;
5731 const long n = RARRAY_LEN(pattern);
5732
5733 for (i = 0; i < n; i++) {
5734 VALUE v = RARRAY_AREF(pattern, i);
5735 VALUE c = check_match(ec, v, target, type);
5736
5737 if (RTEST(c)) {
5738 return c;
5739 }
5740 }
5741 return Qfalse;
5742 }
5743 else {
5744 return check_match(ec, pattern, target, type);
5745 }
5746}
5747
5748VALUE
5749rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5750{
5751 return vm_check_match(ec, target, pattern, flag);
5752}
5753
5754static VALUE
5755vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5756{
5757 const VALUE kw_bits = *(ep - bits);
5758
5759 if (FIXNUM_P(kw_bits)) {
5760 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5761 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5762 return Qfalse;
5763 }
5764 else {
5765 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5766 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5767 }
5768 return Qtrue;
5769}
5770
5771static void
5772vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5773{
5774 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5775 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5776 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5777 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5778
5779 switch (flag) {
5780 case RUBY_EVENT_CALL:
5781 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5782 return;
5783 case RUBY_EVENT_C_CALL:
5784 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5785 return;
5786 case RUBY_EVENT_RETURN:
5787 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5788 return;
5790 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5791 return;
5792 }
5793 }
5794}
5795
5796static VALUE
5797vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5798{
5799 if (!rb_const_defined_at(cbase, id)) {
5800 return 0;
5801 }
5802 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5803 return rb_public_const_get_at(cbase, id);
5804 }
5805 else {
5806 return rb_const_get_at(cbase, id);
5807 }
5808}
5809
5810static VALUE
5811vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5812{
5813 if (!RB_TYPE_P(klass, T_CLASS)) {
5814 return 0;
5815 }
5816 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5817 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5818
5819 if (tmp != super) {
5820 rb_raise(rb_eTypeError,
5821 "superclass mismatch for class %"PRIsVALUE"",
5822 rb_id2str(id));
5823 }
5824 else {
5825 return klass;
5826 }
5827 }
5828 else {
5829 return klass;
5830 }
5831}
5832
5833static VALUE
5834vm_check_if_module(ID id, VALUE mod)
5835{
5836 if (!RB_TYPE_P(mod, T_MODULE)) {
5837 return 0;
5838 }
5839 else {
5840 return mod;
5841 }
5842}
5843
5844static VALUE
5845declare_under(ID id, VALUE cbase, VALUE c)
5846{
5847 rb_set_class_path_string(c, cbase, rb_id2str(id));
5848 rb_const_set(cbase, id, c);
5849 return c;
5850}
5851
5852static VALUE
5853vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5854{
5855 /* new class declaration */
5856 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5857 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5859 rb_class_inherited(s, c);
5860 return c;
5861}
5862
5863static VALUE
5864vm_declare_module(ID id, VALUE cbase)
5865{
5866 /* new module declaration */
5867 return declare_under(id, cbase, rb_module_new());
5868}
5869
5870NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5871static void
5872unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5873{
5874 VALUE name = rb_id2str(id);
5875 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5876 name, type);
5877 VALUE location = rb_const_source_location_at(cbase, id);
5878 if (!NIL_P(location)) {
5879 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5880 " previous definition of %"PRIsVALUE" was here",
5881 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5882 }
5884}
5885
5886static VALUE
5887vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5888{
5889 VALUE klass;
5890
5891 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5892 rb_raise(rb_eTypeError,
5893 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5894 rb_obj_class(super));
5895 }
5896
5897 vm_check_if_namespace(cbase);
5898
5899 /* find klass */
5900 rb_autoload_load(cbase, id);
5901
5902 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5903 if (!vm_check_if_class(id, flags, super, klass))
5904 unmatched_redefinition("class", cbase, id, klass);
5905 return klass;
5906 }
5907 else {
5908 return vm_declare_class(id, flags, cbase, super);
5909 }
5910}
5911
5912static VALUE
5913vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5914{
5915 VALUE mod;
5916
5917 vm_check_if_namespace(cbase);
5918 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5919 if (!vm_check_if_module(id, mod))
5920 unmatched_redefinition("module", cbase, id, mod);
5921 return mod;
5922 }
5923 else {
5924 return vm_declare_module(id, cbase);
5925 }
5926}
5927
5928static VALUE
5929vm_find_or_create_class_by_id(ID id,
5930 rb_num_t flags,
5931 VALUE cbase,
5932 VALUE super)
5933{
5934 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5935
5936 switch (type) {
5937 case VM_DEFINECLASS_TYPE_CLASS:
5938 /* classdef returns class scope value */
5939 return vm_define_class(id, flags, cbase, super);
5940
5941 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5942 /* classdef returns class scope value */
5943 return rb_singleton_class(cbase);
5944
5945 case VM_DEFINECLASS_TYPE_MODULE:
5946 /* classdef returns class scope value */
5947 return vm_define_module(id, flags, cbase);
5948
5949 default:
5950 rb_bug("unknown defineclass type: %d", (int)type);
5951 }
5952}
5953
5954static rb_method_visibility_t
5955vm_scope_visibility_get(const rb_execution_context_t *ec)
5956{
5957 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5958
5959 if (!vm_env_cref_by_cref(cfp->ep)) {
5960 return METHOD_VISI_PUBLIC;
5961 }
5962 else {
5963 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5964 }
5965}
5966
5967static int
5968vm_scope_module_func_check(const rb_execution_context_t *ec)
5969{
5970 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5971
5972 if (!vm_env_cref_by_cref(cfp->ep)) {
5973 return FALSE;
5974 }
5975 else {
5976 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5977 }
5978}
5979
5980static void
5981vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5982{
5983 VALUE klass;
5984 rb_method_visibility_t visi;
5985 rb_cref_t *cref = vm_ec_cref(ec);
5986
5987 if (is_singleton) {
5988 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5989 visi = METHOD_VISI_PUBLIC;
5990 }
5991 else {
5992 klass = CREF_CLASS_FOR_DEFINITION(cref);
5993 visi = vm_scope_visibility_get(ec);
5994 }
5995
5996 if (NIL_P(klass)) {
5997 rb_raise(rb_eTypeError, "no class/module to add method");
5998 }
5999
6000 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6001 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6002 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6003 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6004 }
6005
6006 if (!is_singleton && vm_scope_module_func_check(ec)) {
6007 klass = rb_singleton_class(klass);
6008 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6009 }
6010}
6011
6012static VALUE
6013vm_invokeblock_i(struct rb_execution_context_struct *ec,
6014 struct rb_control_frame_struct *reg_cfp,
6015 struct rb_calling_info *calling)
6016{
6017 const struct rb_callinfo *ci = calling->cd->ci;
6018 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6019
6020 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6021 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6022 }
6023 else {
6024 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6025 }
6026}
6027
6028enum method_explorer_type {
6029 mexp_search_method,
6030 mexp_search_invokeblock,
6031 mexp_search_super,
6032};
6033
6034static inline VALUE
6035vm_sendish(
6036 struct rb_execution_context_struct *ec,
6037 struct rb_control_frame_struct *reg_cfp,
6038 struct rb_call_data *cd,
6039 VALUE block_handler,
6040 enum method_explorer_type method_explorer
6041) {
6042 VALUE val = Qundef;
6043 const struct rb_callinfo *ci = cd->ci;
6044 const struct rb_callcache *cc;
6045 int argc = vm_ci_argc(ci);
6046 VALUE recv = TOPN(argc);
6047 struct rb_calling_info calling = {
6048 .block_handler = block_handler,
6049 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6050 .recv = recv,
6051 .argc = argc,
6052 .cd = cd,
6053 };
6054
6055 switch (method_explorer) {
6056 case mexp_search_method:
6057 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6058 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6059 break;
6060 case mexp_search_super:
6061 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6062 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6063 break;
6064 case mexp_search_invokeblock:
6065 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6066 break;
6067 }
6068 return val;
6069}
6070
6071VALUE
6072rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6073{
6074 stack_check(ec);
6075
6076 struct rb_forwarding_call_data adjusted_cd;
6077 struct rb_callinfo adjusted_ci;
6078
6079 VALUE bh;
6080 VALUE val;
6081
6082 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6083 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6084
6085 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6086
6087 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6088 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6089 }
6090 }
6091 else {
6092 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6093 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6094 }
6095
6096 VM_EXEC(ec, val);
6097 return val;
6098}
6099
6100VALUE
6101rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6102{
6103 stack_check(ec);
6104 VALUE bh = VM_BLOCK_HANDLER_NONE;
6105 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6106 VM_EXEC(ec, val);
6107 return val;
6108}
6109
6110VALUE
6111rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6112{
6113 stack_check(ec);
6114 struct rb_forwarding_call_data adjusted_cd;
6115 struct rb_callinfo adjusted_ci;
6116
6117 VALUE bh;
6118 VALUE val;
6119
6120 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6121 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6122
6123 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6124
6125 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6126 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6127 }
6128 }
6129 else {
6130 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6131 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6132 }
6133
6134 VM_EXEC(ec, val);
6135 return val;
6136}
6137
6138VALUE
6139rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6140{
6141 stack_check(ec);
6142 VALUE bh = VM_BLOCK_HANDLER_NONE;
6143 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6144 VM_EXEC(ec, val);
6145 return val;
6146}
6147
6148/* object.c */
6149VALUE rb_nil_to_s(VALUE);
6150VALUE rb_true_to_s(VALUE);
6151VALUE rb_false_to_s(VALUE);
6152/* numeric.c */
6153VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6154VALUE rb_fix_to_s(VALUE);
6155/* variable.c */
6156VALUE rb_mod_to_s(VALUE);
6158
6159static VALUE
6160vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6161{
6162 int type = TYPE(recv);
6163 if (type == T_STRING) {
6164 return recv;
6165 }
6166
6167 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6168
6169 switch (type) {
6170 case T_SYMBOL:
6171 if (check_method_basic_definition(vm_cc_cme(cc))) {
6172 // rb_sym_to_s() allocates a mutable string, but since we are only
6173 // going to use this string for interpolation, it's fine to use the
6174 // frozen string.
6175 return rb_sym2str(recv);
6176 }
6177 break;
6178 case T_MODULE:
6179 case T_CLASS:
6180 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6181 // rb_mod_to_s() allocates a mutable string, but since we are only
6182 // going to use this string for interpolation, it's fine to use the
6183 // frozen string.
6184 VALUE val = rb_mod_name(recv);
6185 if (NIL_P(val)) {
6186 val = rb_mod_to_s(recv);
6187 }
6188 return val;
6189 }
6190 break;
6191 case T_NIL:
6192 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6193 return rb_nil_to_s(recv);
6194 }
6195 break;
6196 case T_TRUE:
6197 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6198 return rb_true_to_s(recv);
6199 }
6200 break;
6201 case T_FALSE:
6202 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6203 return rb_false_to_s(recv);
6204 }
6205 break;
6206 case T_FIXNUM:
6207 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6208 return rb_fix_to_s(recv);
6209 }
6210 break;
6211 }
6212 return Qundef;
6213}
6214
6215static VALUE
6216vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6217{
6218 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6219 return ary;
6220 }
6221 else {
6222 return Qundef;
6223 }
6224}
6225
6226static VALUE
6227vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6228{
6229 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6230 return hash;
6231 }
6232 else {
6233 return Qundef;
6234 }
6235}
6236
6237static VALUE
6238vm_opt_str_freeze(VALUE str, int bop, ID id)
6239{
6240 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6241 return str;
6242 }
6243 else {
6244 return Qundef;
6245 }
6246}
6247
6248/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6249#define id_cmp idCmp
6250
6251static VALUE
6252vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6253{
6254 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6255 return rb_ary_includes(ary, target);
6256 }
6257 else {
6258 VALUE args[1] = {target};
6259
6260 // duparray
6261 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6262 VALUE dupary = rb_ary_resurrect(ary);
6263
6264 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6265 }
6266}
6267
6268VALUE
6269rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6270{
6271 return vm_opt_duparray_include_p(ec, ary, target);
6272}
6273
6274static VALUE
6275vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6276{
6277 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6278 if (num == 0) {
6279 return Qnil;
6280 }
6281 else {
6282 VALUE result = *ptr;
6283 rb_snum_t i = num - 1;
6284 while (i-- > 0) {
6285 const VALUE v = *++ptr;
6286 if (OPTIMIZED_CMP(v, result) > 0) {
6287 result = v;
6288 }
6289 }
6290 return result;
6291 }
6292 }
6293 else {
6294 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6295 }
6296}
6297
6298VALUE
6299rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6300{
6301 return vm_opt_newarray_max(ec, num, ptr);
6302}
6303
6304static VALUE
6305vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6306{
6307 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6308 if (num == 0) {
6309 return Qnil;
6310 }
6311 else {
6312 VALUE result = *ptr;
6313 rb_snum_t i = num - 1;
6314 while (i-- > 0) {
6315 const VALUE v = *++ptr;
6316 if (OPTIMIZED_CMP(v, result) < 0) {
6317 result = v;
6318 }
6319 }
6320 return result;
6321 }
6322 }
6323 else {
6324 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6325 }
6326}
6327
6328VALUE
6329rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6330{
6331 return vm_opt_newarray_min(ec, num, ptr);
6332}
6333
6334static VALUE
6335vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6336{
6337 // If Array#hash is _not_ monkeypatched, use the optimized call
6338 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6339 return rb_ary_hash_values(num, ptr);
6340 }
6341 else {
6342 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6343 }
6344}
6345
6346VALUE
6347rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6348{
6349 return vm_opt_newarray_hash(ec, num, ptr);
6350}
6351
6352VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6353VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6354
6355static VALUE
6356vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6357{
6358 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6359 struct RArray fake_ary;
6360 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6361 return rb_ary_includes(ary, target);
6362 }
6363 else {
6364 VALUE args[1] = {target};
6365 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6366 }
6367}
6368
6369VALUE
6370rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6371{
6372 return vm_opt_newarray_include_p(ec, num, ptr, target);
6373}
6374
6375static VALUE
6376vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6377{
6378 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6379 struct RArray fake_ary;
6380 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6381 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6382 }
6383 else {
6384 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6385 // Setup an array with room for keyword hash.
6386 VALUE args[2];
6387 args[0] = fmt;
6388 int kw_splat = RB_NO_KEYWORDS;
6389 int argc = 1;
6390
6391 if (!UNDEF_P(buffer)) {
6392 args[1] = rb_hash_new_with_size(1);
6393 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6394 kw_splat = RB_PASS_KEYWORDS;
6395 argc++;
6396 }
6397
6398 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6399 }
6400}
6401
6402VALUE
6403rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6404{
6405 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6406}
6407
6408VALUE
6409rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6410{
6411 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6412}
6413
6414#undef id_cmp
6415
6416static void
6417vm_track_constant_cache(ID id, void *ic)
6418{
6419 rb_vm_t *vm = GET_VM();
6420 struct rb_id_table *const_cache = vm->constant_cache;
6421 VALUE lookup_result;
6422 set_table *ics;
6423
6424 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6425 ics = (set_table *)lookup_result;
6426 }
6427 else {
6428 ics = set_init_numtable();
6429 rb_id_table_insert(const_cache, id, (VALUE)ics);
6430 }
6431
6432 /* The call below to st_insert could allocate which could trigger a GC.
6433 * If it triggers a GC, it may free an iseq that also holds a cache to this
6434 * constant. If that iseq is the last iseq with a cache to this constant, then
6435 * it will free this ST table, which would cause an use-after-free during this
6436 * st_insert.
6437 *
6438 * So to fix this issue, we store the ID that is currently being inserted
6439 * and, in remove_from_constant_cache, we don't free the ST table for ID
6440 * equal to this one.
6441 *
6442 * See [Bug #20921].
6443 */
6444 vm->inserting_constant_cache_id = id;
6445
6446 set_insert(ics, (st_data_t)ic);
6447
6448 vm->inserting_constant_cache_id = (ID)0;
6449}
6450
6451static void
6452vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6453{
6454 RB_VM_LOCKING() {
6455 for (int i = 0; segments[i]; i++) {
6456 ID id = segments[i];
6457 if (id == idNULL) continue;
6458 vm_track_constant_cache(id, ic);
6459 }
6460 }
6461}
6462
6463// For JIT inlining
6464static inline bool
6465vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6466{
6467 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6468 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6469
6470 return (ic_cref == NULL || // no need to check CREF
6471 ic_cref == vm_get_cref(reg_ep));
6472 }
6473 return false;
6474}
6475
6476static bool
6477vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6478{
6479 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6480 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6481}
6482
6483// YJIT needs this function to never allocate and never raise
6484bool
6485rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6486{
6487 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6488}
6489
6490static void
6491vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6492{
6493 if (ruby_vm_const_missing_count > 0) {
6494 ruby_vm_const_missing_count = 0;
6495 ic->entry = NULL;
6496 return;
6497 }
6498
6499 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6500 RB_OBJ_WRITE(ice, &ice->value, val);
6501 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6502 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6503 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6504
6505 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6506 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6507 rb_yjit_constant_ic_update(iseq, ic, pos);
6508}
6509
6510VALUE
6511rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6512{
6513 VALUE val;
6514 const ID *segments = ic->segments;
6515 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6516 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6517 val = ice->value;
6518
6519 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6520 }
6521 else {
6522 ruby_vm_constant_cache_misses++;
6523 val = vm_get_ev_const_chain(ec, segments);
6524 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6525 // Undo the PC increment to get the address to this instruction
6526 // INSN_ATTR(width) == 2
6527 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6528 }
6529 return val;
6530}
6531
6532static VALUE
6533vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6534{
6535 rb_thread_t *th = rb_ec_thread_ptr(ec);
6536 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6537
6538 again:
6539 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6540 return is->once.value;
6541 }
6542 else if (is->once.running_thread == NULL) {
6543 VALUE val;
6544 is->once.running_thread = th;
6545 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6546 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6547 /* is->once.running_thread is cleared by vm_once_clear() */
6548 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6549 return val;
6550 }
6551 else if (is->once.running_thread == th) {
6552 /* recursive once */
6553 return vm_once_exec((VALUE)iseq);
6554 }
6555 else {
6556 /* waiting for finish */
6557 RUBY_VM_CHECK_INTS(ec);
6559 goto again;
6560 }
6561}
6562
6563static OFFSET
6564vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6565{
6566 switch (OBJ_BUILTIN_TYPE(key)) {
6567 case -1:
6568 case T_FLOAT:
6569 case T_SYMBOL:
6570 case T_BIGNUM:
6571 case T_STRING:
6572 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6573 SYMBOL_REDEFINED_OP_FLAG |
6574 INTEGER_REDEFINED_OP_FLAG |
6575 FLOAT_REDEFINED_OP_FLAG |
6576 NIL_REDEFINED_OP_FLAG |
6577 TRUE_REDEFINED_OP_FLAG |
6578 FALSE_REDEFINED_OP_FLAG |
6579 STRING_REDEFINED_OP_FLAG)) {
6580 st_data_t val;
6581 if (RB_FLOAT_TYPE_P(key)) {
6582 double kval = RFLOAT_VALUE(key);
6583 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6584 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6585 }
6586 }
6587 if (rb_hash_stlike_lookup(hash, key, &val)) {
6588 return FIX2LONG((VALUE)val);
6589 }
6590 else {
6591 return else_offset;
6592 }
6593 }
6594 }
6595 return 0;
6596}
6597
6598NORETURN(static void
6599 vm_stack_consistency_error(const rb_execution_context_t *ec,
6600 const rb_control_frame_t *,
6601 const VALUE *));
6602static void
6603vm_stack_consistency_error(const rb_execution_context_t *ec,
6604 const rb_control_frame_t *cfp,
6605 const VALUE *bp)
6606{
6607 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6608 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6609 static const char stack_consistency_error[] =
6610 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6611#if defined RUBY_DEVEL
6612 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6613 rb_str_cat_cstr(mesg, "\n");
6614 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6616#else
6617 rb_bug(stack_consistency_error, nsp, nbp);
6618#endif
6619}
6620
6621static VALUE
6622vm_opt_plus(VALUE recv, VALUE obj)
6623{
6624 if (FIXNUM_2_P(recv, obj) &&
6625 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6626 return rb_fix_plus_fix(recv, obj);
6627 }
6628 else if (FLONUM_2_P(recv, obj) &&
6629 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6630 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6631 }
6632 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6633 return Qundef;
6634 }
6635 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6636 RBASIC_CLASS(obj) == rb_cFloat &&
6637 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6638 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6639 }
6640 else if (RBASIC_CLASS(recv) == rb_cString &&
6641 RBASIC_CLASS(obj) == rb_cString &&
6642 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6643 return rb_str_opt_plus(recv, obj);
6644 }
6645 else if (RBASIC_CLASS(recv) == rb_cArray &&
6646 RBASIC_CLASS(obj) == rb_cArray &&
6647 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6648 return rb_ary_plus(recv, obj);
6649 }
6650 else {
6651 return Qundef;
6652 }
6653}
6654
6655static VALUE
6656vm_opt_minus(VALUE recv, VALUE obj)
6657{
6658 if (FIXNUM_2_P(recv, obj) &&
6659 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6660 return rb_fix_minus_fix(recv, obj);
6661 }
6662 else if (FLONUM_2_P(recv, obj) &&
6663 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6664 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6665 }
6666 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6667 return Qundef;
6668 }
6669 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6670 RBASIC_CLASS(obj) == rb_cFloat &&
6671 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6672 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6673 }
6674 else {
6675 return Qundef;
6676 }
6677}
6678
6679static VALUE
6680vm_opt_mult(VALUE recv, VALUE obj)
6681{
6682 if (FIXNUM_2_P(recv, obj) &&
6683 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6684 return rb_fix_mul_fix(recv, obj);
6685 }
6686 else if (FLONUM_2_P(recv, obj) &&
6687 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6688 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6689 }
6690 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6691 return Qundef;
6692 }
6693 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6694 RBASIC_CLASS(obj) == rb_cFloat &&
6695 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6696 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6697 }
6698 else {
6699 return Qundef;
6700 }
6701}
6702
6703static VALUE
6704vm_opt_div(VALUE recv, VALUE obj)
6705{
6706 if (FIXNUM_2_P(recv, obj) &&
6707 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6708 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6709 }
6710 else if (FLONUM_2_P(recv, obj) &&
6711 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6712 return rb_flo_div_flo(recv, obj);
6713 }
6714 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6715 return Qundef;
6716 }
6717 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6718 RBASIC_CLASS(obj) == rb_cFloat &&
6719 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6720 return rb_flo_div_flo(recv, obj);
6721 }
6722 else {
6723 return Qundef;
6724 }
6725}
6726
6727static VALUE
6728vm_opt_mod(VALUE recv, VALUE obj)
6729{
6730 if (FIXNUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6732 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6733 }
6734 else if (FLONUM_2_P(recv, obj) &&
6735 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6736 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6737 }
6738 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6739 return Qundef;
6740 }
6741 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6742 RBASIC_CLASS(obj) == rb_cFloat &&
6743 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6744 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6745 }
6746 else {
6747 return Qundef;
6748 }
6749}
6750
6751static VALUE
6752vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6753{
6754 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6755 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6756
6757 if (!UNDEF_P(val)) {
6758 return RBOOL(!RTEST(val));
6759 }
6760 }
6761
6762 return Qundef;
6763}
6764
6765static VALUE
6766vm_opt_lt(VALUE recv, VALUE obj)
6767{
6768 if (FIXNUM_2_P(recv, obj) &&
6769 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6770 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6771 }
6772 else if (FLONUM_2_P(recv, obj) &&
6773 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6774 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6775 }
6776 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6777 return Qundef;
6778 }
6779 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6780 RBASIC_CLASS(obj) == rb_cFloat &&
6781 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6782 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6783 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6784 }
6785 else {
6786 return Qundef;
6787 }
6788}
6789
6790static VALUE
6791vm_opt_le(VALUE recv, VALUE obj)
6792{
6793 if (FIXNUM_2_P(recv, obj) &&
6794 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6795 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6796 }
6797 else if (FLONUM_2_P(recv, obj) &&
6798 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6799 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6800 }
6801 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6802 return Qundef;
6803 }
6804 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6805 RBASIC_CLASS(obj) == rb_cFloat &&
6806 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6807 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6808 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6809 }
6810 else {
6811 return Qundef;
6812 }
6813}
6814
6815static VALUE
6816vm_opt_gt(VALUE recv, VALUE obj)
6817{
6818 if (FIXNUM_2_P(recv, obj) &&
6819 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6820 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6821 }
6822 else if (FLONUM_2_P(recv, obj) &&
6823 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6824 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6825 }
6826 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6827 return Qundef;
6828 }
6829 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6830 RBASIC_CLASS(obj) == rb_cFloat &&
6831 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6832 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6833 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6834 }
6835 else {
6836 return Qundef;
6837 }
6838}
6839
6840static VALUE
6841vm_opt_ge(VALUE recv, VALUE obj)
6842{
6843 if (FIXNUM_2_P(recv, obj) &&
6844 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6845 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6846 }
6847 else if (FLONUM_2_P(recv, obj) &&
6848 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6849 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6850 }
6851 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6852 return Qundef;
6853 }
6854 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6855 RBASIC_CLASS(obj) == rb_cFloat &&
6856 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6857 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6858 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6859 }
6860 else {
6861 return Qundef;
6862 }
6863}
6864
6865
6866static VALUE
6867vm_opt_ltlt(VALUE recv, VALUE obj)
6868{
6869 if (SPECIAL_CONST_P(recv)) {
6870 return Qundef;
6871 }
6872 else if (RBASIC_CLASS(recv) == rb_cString &&
6873 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6874 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6875 return rb_str_buf_append(recv, obj);
6876 }
6877 else {
6878 return rb_str_concat(recv, obj);
6879 }
6880 }
6881 else if (RBASIC_CLASS(recv) == rb_cArray &&
6882 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6883 return rb_ary_push(recv, obj);
6884 }
6885 else {
6886 return Qundef;
6887 }
6888}
6889
6890static VALUE
6891vm_opt_and(VALUE recv, VALUE obj)
6892{
6893 // If recv and obj are both fixnums, then the bottom tag bit
6894 // will be 1 on both. 1 & 1 == 1, so the result value will also
6895 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6896 // will be 0, and we return Qundef.
6897 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6898
6899 if (FIXNUM_P(ret) &&
6900 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6901 return ret;
6902 }
6903 else {
6904 return Qundef;
6905 }
6906}
6907
6908static VALUE
6909vm_opt_or(VALUE recv, VALUE obj)
6910{
6911 if (FIXNUM_2_P(recv, obj) &&
6912 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6913 return recv | obj;
6914 }
6915 else {
6916 return Qundef;
6917 }
6918}
6919
6920static VALUE
6921vm_opt_aref(VALUE recv, VALUE obj)
6922{
6923 if (SPECIAL_CONST_P(recv)) {
6924 if (FIXNUM_2_P(recv, obj) &&
6925 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6926 return rb_fix_aref(recv, obj);
6927 }
6928 return Qundef;
6929 }
6930 else if (RBASIC_CLASS(recv) == rb_cArray &&
6931 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6932 if (FIXNUM_P(obj)) {
6933 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6934 }
6935 else {
6936 return rb_ary_aref1(recv, obj);
6937 }
6938 }
6939 else if (RBASIC_CLASS(recv) == rb_cHash &&
6940 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6941 return rb_hash_aref(recv, obj);
6942 }
6943 else {
6944 return Qundef;
6945 }
6946}
6947
6948static VALUE
6949vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6950{
6951 if (SPECIAL_CONST_P(recv)) {
6952 return Qundef;
6953 }
6954 else if (RBASIC_CLASS(recv) == rb_cArray &&
6955 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6956 FIXNUM_P(obj)) {
6957 rb_ary_store(recv, FIX2LONG(obj), set);
6958 return set;
6959 }
6960 else if (RBASIC_CLASS(recv) == rb_cHash &&
6961 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6962 rb_hash_aset(recv, obj, set);
6963 return set;
6964 }
6965 else {
6966 return Qundef;
6967 }
6968}
6969
6970static VALUE
6971vm_opt_aref_with(VALUE recv, VALUE key)
6972{
6973 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6974 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6975 rb_hash_compare_by_id_p(recv) == Qfalse &&
6976 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6977 return rb_hash_aref(recv, key);
6978 }
6979 else {
6980 return Qundef;
6981 }
6982}
6983
6984VALUE
6985rb_vm_opt_aref_with(VALUE recv, VALUE key)
6986{
6987 return vm_opt_aref_with(recv, key);
6988}
6989
6990static VALUE
6991vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6992{
6993 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6994 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6995 rb_hash_compare_by_id_p(recv) == Qfalse) {
6996 return rb_hash_aset(recv, key, val);
6997 }
6998 else {
6999 return Qundef;
7000 }
7001}
7002
7003VALUE
7004rb_vm_opt_aset_with(VALUE recv, VALUE key, VALUE value)
7005{
7006 return vm_opt_aset_with(recv, key, value);
7007}
7008
7009static VALUE
7010vm_opt_length(VALUE recv, int bop)
7011{
7012 if (SPECIAL_CONST_P(recv)) {
7013 return Qundef;
7014 }
7015 else if (RBASIC_CLASS(recv) == rb_cString &&
7016 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7017 if (bop == BOP_EMPTY_P) {
7018 return LONG2NUM(RSTRING_LEN(recv));
7019 }
7020 else {
7021 return rb_str_length(recv);
7022 }
7023 }
7024 else if (RBASIC_CLASS(recv) == rb_cArray &&
7025 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7026 return LONG2NUM(RARRAY_LEN(recv));
7027 }
7028 else if (RBASIC_CLASS(recv) == rb_cHash &&
7029 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7030 return INT2FIX(RHASH_SIZE(recv));
7031 }
7032 else {
7033 return Qundef;
7034 }
7035}
7036
7037static VALUE
7038vm_opt_empty_p(VALUE recv)
7039{
7040 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7041 case Qundef: return Qundef;
7042 case INT2FIX(0): return Qtrue;
7043 default: return Qfalse;
7044 }
7045}
7046
7047VALUE rb_false(VALUE obj);
7048
7049static VALUE
7050vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7051{
7052 if (NIL_P(recv) &&
7053 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7054 return Qtrue;
7055 }
7056 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
7057 return Qfalse;
7058 }
7059 else {
7060 return Qundef;
7061 }
7062}
7063
7064static VALUE
7065fix_succ(VALUE x)
7066{
7067 switch (x) {
7068 case ~0UL:
7069 /* 0xFFFF_FFFF == INT2FIX(-1)
7070 * `-1.succ` is of course 0. */
7071 return INT2FIX(0);
7072 case RSHIFT(~0UL, 1):
7073 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7074 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7075 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7076 default:
7077 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7078 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7079 * == lx*2 + ly*2 + 1
7080 * == (lx*2+1) + (ly*2+1) - 1
7081 * == x + y - 1
7082 *
7083 * Here, if we put y := INT2FIX(1):
7084 *
7085 * == x + INT2FIX(1) - 1
7086 * == x + 2 .
7087 */
7088 return x + 2;
7089 }
7090}
7091
7092static VALUE
7093vm_opt_succ(VALUE recv)
7094{
7095 if (FIXNUM_P(recv) &&
7096 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7097 return fix_succ(recv);
7098 }
7099 else if (SPECIAL_CONST_P(recv)) {
7100 return Qundef;
7101 }
7102 else if (RBASIC_CLASS(recv) == rb_cString &&
7103 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7104 return rb_str_succ(recv);
7105 }
7106 else {
7107 return Qundef;
7108 }
7109}
7110
7111static VALUE
7112vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7113{
7114 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7115 return RBOOL(!RTEST(recv));
7116 }
7117 else {
7118 return Qundef;
7119 }
7120}
7121
7122static VALUE
7123vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7124{
7125 if (SPECIAL_CONST_P(recv)) {
7126 return Qundef;
7127 }
7128 else if (RBASIC_CLASS(recv) == rb_cString &&
7129 CLASS_OF(obj) == rb_cRegexp &&
7130 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7131 return rb_reg_match(obj, recv);
7132 }
7133 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7134 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7135 return rb_reg_match(recv, obj);
7136 }
7137 else {
7138 return Qundef;
7139 }
7140}
7141
7142rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7143
7144NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7145
7146static inline void
7147vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7148 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7149 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7150{
7151 rb_event_flag_t event = pc_events & target_event;
7152 VALUE self = GET_SELF();
7153
7154 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7155
7156 if (event & global_hooks->events) {
7157 /* increment PC because source line is calculated with PC-1 */
7158 reg_cfp->pc++;
7159 vm_dtrace(event, ec);
7160 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7161 reg_cfp->pc--;
7162 }
7163
7164 // Load here since global hook above can add and free local hooks
7165 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7166 if (local_hooks != NULL) {
7167 if (event & local_hooks->events) {
7168 /* increment PC because source line is calculated with PC-1 */
7169 reg_cfp->pc++;
7170 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7171 reg_cfp->pc--;
7172 }
7173 }
7174}
7175
7176#define VM_TRACE_HOOK(target_event, val) do { \
7177 if ((pc_events & (target_event)) & enabled_flags) { \
7178 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7179 } \
7180} while (0)
7181
7182static VALUE
7183rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7184{
7185 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7186 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7187 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7188}
7189
7190static void
7191vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7192{
7193 const VALUE *pc = reg_cfp->pc;
7194 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7195 rb_event_flag_t global_events = enabled_flags;
7196
7197 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7198 return;
7199 }
7200 else {
7201 const rb_iseq_t *iseq = reg_cfp->iseq;
7202 VALUE iseq_val = (VALUE)iseq;
7203 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7204 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7205 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7206 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7207 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7208 rb_hook_list_t *bmethod_local_hooks = NULL;
7209 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7210 rb_event_flag_t bmethod_local_events = 0;
7211 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7212 enabled_flags |= iseq_local_events;
7213
7214 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7215
7216 if (bmethod_frame) {
7217 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7218 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7219 bmethod_local_hooks = me->def->body.bmethod.hooks;
7220 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7221 if (bmethod_local_hooks) {
7222 bmethod_local_events = bmethod_local_hooks->events;
7223 }
7224 }
7225
7226
7227 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7228#if 0
7229 /* disable trace */
7230 /* TODO: incomplete */
7231 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7232#else
7233 /* do not disable trace because of performance problem
7234 * (re-enable overhead)
7235 */
7236#endif
7237 return;
7238 }
7239 else if (ec->trace_arg != NULL) {
7240 /* already tracing */
7241 return;
7242 }
7243 else {
7244 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7245 /* Note, not considering iseq local events here since the same
7246 * iseq could be used in multiple bmethods. */
7247 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7248
7249 if (0) {
7250 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7251 (int)pos,
7252 (int)pc_events,
7253 RSTRING_PTR(rb_iseq_path(iseq)),
7254 (int)rb_iseq_line_no(iseq, pos),
7255 RSTRING_PTR(rb_iseq_label(iseq)));
7256 }
7257 VM_ASSERT(reg_cfp->pc == pc);
7258 VM_ASSERT(pc_events != 0);
7259
7260 /* check traces */
7261 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7262 /* b_call instruction running as a method. Fire call event. */
7263 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7264 }
7266 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7267 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7268 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7269 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7270 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7271 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7272 /* b_return instruction running as a method. Fire return event. */
7273 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7274 }
7275
7276 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7277 // We need the pointer to stay valid in case compaction happens in a trace hook.
7278 //
7279 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7280 // storage for `rb_method_definition_t` is not on the GC heap.
7281 RB_GC_GUARD(iseq_val);
7282 }
7283 }
7284}
7285#undef VM_TRACE_HOOK
7286
7287#if VM_CHECK_MODE > 0
7288NORETURN( NOINLINE( COLDFUNC
7289void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7290
7291void
7292Init_vm_stack_canary(void)
7293{
7294 /* This has to be called _after_ our PRNG is properly set up. */
7295 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7296 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7297
7298 vm_stack_canary_was_born = true;
7299 VM_ASSERT(n == 0);
7300}
7301
7302void
7303rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7304{
7305 /* Because a method has already been called, why not call
7306 * another one. */
7307 const char *insn = rb_insns_name(i);
7308 VALUE inspection = rb_inspect(c);
7309 const char *str = StringValueCStr(inspection);
7310
7311 rb_bug("dead canary found at %s: %s", insn, str);
7312}
7313
7314#else
7315void Init_vm_stack_canary(void) { /* nothing to do */ }
7316#endif
7317
7318
7319/* a part of the following code is generated by this ruby script:
7320
732116.times{|i|
7322 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7323 typedef_args.prepend(", ") if i != 0
7324 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7325 call_args.prepend(", ") if i != 0
7326 puts %Q{
7327static VALUE
7328builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7329{
7330 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7331 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7332}}
7333}
7334
7335puts
7336puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
733716.times{|i|
7338 puts " builtin_invoker#{i},"
7339}
7340puts "};"
7341*/
7342
7343static VALUE
7344builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7345{
7346 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7347 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7348}
7349
7350static VALUE
7351builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7352{
7353 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7354 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7355}
7356
7357static VALUE
7358builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7359{
7360 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7361 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7362}
7363
7364static VALUE
7365builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7366{
7367 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7368 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7369}
7370
7371static VALUE
7372builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7373{
7374 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7375 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7376}
7377
7378static VALUE
7379builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7380{
7381 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7382 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7383}
7384
7385static VALUE
7386builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7387{
7388 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7389 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7390}
7391
7392static VALUE
7393builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7394{
7395 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7396 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7397}
7398
7399static VALUE
7400builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7401{
7402 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7403 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7404}
7405
7406static VALUE
7407builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7408{
7409 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7410 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7411}
7412
7413static VALUE
7414builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7415{
7416 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7417 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7418}
7419
7420static VALUE
7421builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7422{
7423 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7424 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7425}
7426
7427static VALUE
7428builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7429{
7430 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7431 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7432}
7433
7434static VALUE
7435builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7436{
7437 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7438 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7439}
7440
7441static VALUE
7442builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7443{
7444 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7445 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7446}
7447
7448static VALUE
7449builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7450{
7451 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7452 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7453}
7454
7455typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7456
7457static builtin_invoker
7458lookup_builtin_invoker(int argc)
7459{
7460 static const builtin_invoker invokers[] = {
7461 builtin_invoker0,
7462 builtin_invoker1,
7463 builtin_invoker2,
7464 builtin_invoker3,
7465 builtin_invoker4,
7466 builtin_invoker5,
7467 builtin_invoker6,
7468 builtin_invoker7,
7469 builtin_invoker8,
7470 builtin_invoker9,
7471 builtin_invoker10,
7472 builtin_invoker11,
7473 builtin_invoker12,
7474 builtin_invoker13,
7475 builtin_invoker14,
7476 builtin_invoker15,
7477 };
7478
7479 return invokers[argc];
7480}
7481
7482static inline VALUE
7483invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7484{
7485 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7486 SETUP_CANARY(canary_p);
7487 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7488 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7489 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7490 return ret;
7491}
7492
7493static VALUE
7494vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7495{
7496 return invoke_bf(ec, cfp, bf, argv);
7497}
7498
7499static VALUE
7500vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7501{
7502 if (0) { // debug print
7503 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7504 for (int i=0; i<bf->argc; i++) {
7505 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7506 }
7507 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7508 (void *)(uintptr_t)bf->func_ptr);
7509 }
7510
7511 if (bf->argc == 0) {
7512 return invoke_bf(ec, cfp, bf, NULL);
7513 }
7514 else {
7515 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7516 return invoke_bf(ec, cfp, bf, argv);
7517 }
7518}
7519
7520// for __builtin_inline!()
7521
7522VALUE
7523rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7524{
7525 const rb_control_frame_t *cfp = ec->cfp;
7526 return cfp->ep[index];
7527}
7528
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2795
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1573
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1465
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1444
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:695
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:64
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2123
VALUE rb_cRegexp
Regexp class.
Definition re.c:2662
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1309
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:655
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:60
VALUE rb_cModule
Module class.
Definition object.c:63
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:233
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:878
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1029
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1952
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3722
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1927
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2009
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1910
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1976
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2042
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3757
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5395
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3723
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3994
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2397
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:942
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1491
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3607
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2107
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4398
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4454
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1458
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:4074
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3442
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:135
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3613
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:421
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2184
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3936
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4476
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:378
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3930
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1597
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2136
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1138
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:998
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376