Ruby 3.5.0dev (2025-04-25 revision c772d2691dabd15f2b6fcb8bddce64c4385b3b23)
vm_insnhelper.c (c772d2691dabd15f2b6fcb8bddce64c4385b3b23)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/struct.h"
30#include "variable.h"
31
32/* finish iseq array */
33#include "insns.inc"
34#include "insns_info.inc"
35
36extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
37extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
38extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
39extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
40 int argc, const VALUE *argv, int priv);
41
42static const struct rb_callcache vm_empty_cc;
43static const struct rb_callcache vm_empty_cc_for_super;
44
45/* control stack frame */
46
47static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
48
50ruby_vm_special_exception_copy(VALUE exc)
51{
53 rb_obj_copy_ivar(e, exc);
54 return e;
55}
56
57NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58static void
59ec_stack_overflow(rb_execution_context_t *ec, int setup)
60{
61 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62 ec->raised_flag = RAISED_STACKOVERFLOW;
63 if (setup) {
64 VALUE at = rb_ec_backtrace_object(ec);
65 mesg = ruby_vm_special_exception_copy(mesg);
66 rb_ivar_set(mesg, idBt, at);
67 rb_ivar_set(mesg, idBt_locations, at);
68 }
69 ec->errinfo = mesg;
70 EC_JUMP_TAG(ec, TAG_RAISE);
71}
72
73NORETURN(static void vm_stackoverflow(void));
74
75static void
76vm_stackoverflow(void)
77{
78 ec_stack_overflow(GET_EC(), TRUE);
79}
80
81NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
82/* critical level
83 * 0: VM stack overflow or about to machine stack overflow
84 * 1: machine stack overflow but may be recoverable
85 * 2: fatal machine stack overflow
86 */
87void
88rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
89{
90 if (rb_during_gc()) {
91 rb_bug("system stack overflow during GC. Faulty native extension?");
92 }
93 if (crit > 1) {
94 ec->raised_flag = RAISED_STACKOVERFLOW;
95 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
96 EC_JUMP_TAG(ec, TAG_RAISE);
97 }
98 ec_stack_overflow(ec, crit == 0);
99}
100
101static inline void stack_check(rb_execution_context_t *ec);
102
103#if VM_CHECK_MODE > 0
104static int
105callable_class_p(VALUE klass)
106{
107#if VM_CHECK_MODE >= 2
108 if (!klass) return FALSE;
109 switch (RB_BUILTIN_TYPE(klass)) {
110 default:
111 break;
112 case T_ICLASS:
113 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
114 case T_MODULE:
115 return TRUE;
116 }
117 while (klass) {
118 if (klass == rb_cBasicObject) {
119 return TRUE;
120 }
121 klass = RCLASS_SUPER(klass);
122 }
123 return FALSE;
124#else
125 return klass != 0;
126#endif
127}
128
129static int
130callable_method_entry_p(const rb_callable_method_entry_t *cme)
131{
132 if (cme == NULL) {
133 return TRUE;
134 }
135 else {
136 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
137
138 if (callable_class_p(cme->defined_class)) {
139 return TRUE;
140 }
141 else {
142 return FALSE;
143 }
144 }
145}
146
147static void
148vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
149{
150 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
151 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
152
153 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
154 cref_or_me_type = imemo_type(cref_or_me);
155 }
156 if (type & VM_FRAME_FLAG_BMETHOD) {
157 req_me = TRUE;
158 }
159
160 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
161 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
162 }
163 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
164 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
165 }
166
167 if (req_me) {
168 if (cref_or_me_type != imemo_ment) {
169 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 }
172 else {
173 if (req_cref && cref_or_me_type != imemo_cref) {
174 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
175 }
176 else { /* cref or Qfalse */
177 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
178 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
179 /* ignore */
180 }
181 else {
182 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
183 }
184 }
185 }
186 }
187
188 if (cref_or_me_type == imemo_ment) {
189 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
190
191 if (!callable_method_entry_p(me)) {
192 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
193 }
194 }
195
196 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
197 VM_ASSERT(iseq == NULL ||
198 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
199 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
200 );
201 }
202 else {
203 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
204 }
205}
206
207static void
208vm_check_frame(VALUE type,
209 VALUE specval,
210 VALUE cref_or_me,
211 const rb_iseq_t *iseq)
212{
213 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
214 VM_ASSERT(FIXNUM_P(type));
215
216#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
217 case magic: \
218 vm_check_frame_detail(type, req_block, req_me, req_cref, \
219 specval, cref_or_me, is_cframe, iseq); \
220 break
221 switch (given_magic) {
222 /* BLK ME CREF CFRAME */
223 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
224 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
225 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
226 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
227 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
228 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
229 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
230 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
231 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
232 default:
233 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
234 }
235#undef CHECK
236}
237
238static VALUE vm_stack_canary; /* Initialized later */
239static bool vm_stack_canary_was_born = false;
240
241// Return the index of the instruction right before the given PC.
242// This is needed because insn_entry advances PC before the insn body.
243static unsigned int
244previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
245{
246 unsigned int pos = 0;
247 while (pos < ISEQ_BODY(iseq)->iseq_size) {
248 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
249 unsigned int next_pos = pos + insn_len(opcode);
250 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
251 return pos;
252 }
253 pos = next_pos;
254 }
255 rb_bug("failed to find the previous insn");
256}
257
258void
259rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
260{
261 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
262 const struct rb_iseq_struct *iseq;
263
264 if (! LIKELY(vm_stack_canary_was_born)) {
265 return; /* :FIXME: isn't it rather fatal to enter this branch? */
266 }
267 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
268 /* This is at the very beginning of a thread. cfp does not exist. */
269 return;
270 }
271 else if (! (iseq = GET_ISEQ())) {
272 return;
273 }
274 else if (LIKELY(sp[0] != vm_stack_canary)) {
275 return;
276 }
277 else {
278 /* we are going to call methods below; squash the canary to
279 * prevent infinite loop. */
280 sp[0] = Qundef;
281 }
282
283 const VALUE *orig = rb_iseq_original_iseq(iseq);
284 const VALUE iseqw = rb_iseqw_new(iseq);
285 const VALUE inspection = rb_inspect(iseqw);
286 const char *stri = rb_str_to_cstr(inspection);
287 const VALUE disasm = rb_iseq_disasm(iseq);
288 const char *strd = rb_str_to_cstr(disasm);
289 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
290 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
291 const char *name = insn_name(insn);
292
293 /* rb_bug() is not capable of outputting this large contents. It
294 is designed to run form a SIGSEGV handler, which tends to be
295 very restricted. */
296 ruby_debug_printf(
297 "We are killing the stack canary set by %s, "
298 "at %s@pc=%"PRIdPTR"\n"
299 "watch out the C stack trace.\n"
300 "%s",
301 name, stri, pos, strd);
302 rb_bug("see above.");
303}
304#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
305
306#else
307#define vm_check_canary(ec, sp)
308#define vm_check_frame(a, b, c, d)
309#endif /* VM_CHECK_MODE > 0 */
310
311#if USE_DEBUG_COUNTER
312static void
313vm_push_frame_debug_counter_inc(
314 const struct rb_execution_context_struct *ec,
315 const struct rb_control_frame_struct *reg_cfp,
316 VALUE type)
317{
318 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
319
320 RB_DEBUG_COUNTER_INC(frame_push);
321
322 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
323 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
324 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
325 if (prev) {
326 if (curr) {
327 RB_DEBUG_COUNTER_INC(frame_R2R);
328 }
329 else {
330 RB_DEBUG_COUNTER_INC(frame_R2C);
331 }
332 }
333 else {
334 if (curr) {
335 RB_DEBUG_COUNTER_INC(frame_C2R);
336 }
337 else {
338 RB_DEBUG_COUNTER_INC(frame_C2C);
339 }
340 }
341 }
342
343 switch (type & VM_FRAME_MAGIC_MASK) {
344 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
345 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
346 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
347 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
348 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
349 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
350 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
351 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
352 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
353 }
354
355 rb_bug("unreachable");
356}
357#else
358#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
359#endif
360
361// Return a poison value to be set above the stack top to verify leafness.
362VALUE
363rb_vm_stack_canary(void)
364{
365#if VM_CHECK_MODE > 0
366 return vm_stack_canary;
367#else
368 return 0;
369#endif
370}
371
372STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
373STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
374STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
375
376static void
377vm_push_frame(rb_execution_context_t *ec,
378 const rb_iseq_t *iseq,
379 VALUE type,
380 VALUE self,
381 VALUE specval,
382 VALUE cref_or_me,
383 const VALUE *pc,
384 VALUE *sp,
385 int local_size,
386 int stack_max)
387{
388 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
389
390 vm_check_frame(type, specval, cref_or_me, iseq);
391 VM_ASSERT(local_size >= 0);
392
393 /* check stack overflow */
394 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
395 vm_check_canary(ec, sp);
396
397 /* setup vm value stack */
398
399 /* initialize local variables */
400 for (int i=0; i < local_size; i++) {
401 *sp++ = Qnil;
402 }
403
404 /* setup ep with managing data */
405 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
406 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
407 *sp++ = type; /* ep[-0] / ENV_FLAGS */
408
409 /* setup new frame */
410 *cfp = (const struct rb_control_frame_struct) {
411 .pc = pc,
412 .sp = sp,
413 .iseq = iseq,
414 .self = self,
415 .ep = sp - 1,
416 .block_code = NULL,
417#if VM_DEBUG_BP_CHECK
418 .bp_check = sp,
419#endif
420 .jit_return = NULL
421 };
422
423 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
424 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
425 future/untested compilers/platforms. */
426
427 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
428 atomic_signal_fence(memory_order_seq_cst);
429 #endif
430
431 ec->cfp = cfp;
432
433 if (VMDEBUG == 2) {
434 SDR();
435 }
436 vm_push_frame_debug_counter_inc(ec, cfp, type);
437}
438
439void
440rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
441{
442 rb_control_frame_t *cfp = ec->cfp;
443
444 if (VMDEBUG == 2) SDR();
445
446 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
447}
448
449/* return TRUE if the frame is finished */
450static inline int
451vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
452{
453 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
454
455 if (VMDEBUG == 2) SDR();
456
457 RUBY_VM_CHECK_INTS(ec);
458 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
459
460 return flags & VM_FRAME_FLAG_FINISH;
461}
462
463void
464rb_vm_pop_frame(rb_execution_context_t *ec)
465{
466 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
467}
468
469// it pushes pseudo-frame with fname filename.
470VALUE
471rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
472{
473 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
474 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
475
476 vm_push_frame(ec,
477 dmy_iseq, //const rb_iseq_t *iseq,
478 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
479 ec->cfp->self, // VALUE self,
480 VM_BLOCK_HANDLER_NONE, // VALUE specval,
481 Qfalse, // VALUE cref_or_me,
482 NULL, // const VALUE *pc,
483 ec->cfp->sp, // VALUE *sp,
484 0, // int local_size,
485 0); // int stack_max
486
487 return (VALUE)dmy_iseq;
488}
489
490/* method dispatch */
491static inline VALUE
492rb_arity_error_new(int argc, int min, int max)
493{
494 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
495 if (min == max) {
496 /* max is not needed */
497 }
498 else if (max == UNLIMITED_ARGUMENTS) {
499 rb_str_cat_cstr(err_mess, "+");
500 }
501 else {
502 rb_str_catf(err_mess, "..%d", max);
503 }
504 rb_str_cat_cstr(err_mess, ")");
505 return rb_exc_new3(rb_eArgError, err_mess);
506}
507
508void
509rb_error_arity(int argc, int min, int max)
510{
511 rb_exc_raise(rb_arity_error_new(argc, min, max));
512}
513
514/* lvar */
515
516NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
517
518static void
519vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
520{
521 /* remember env value forcely */
522 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
523 VM_FORCE_WRITE(&ep[index], v);
524 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
525 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
526}
527
528// YJIT assumes this function never runs GC
529static inline void
530vm_env_write(const VALUE *ep, int index, VALUE v)
531{
532 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
533 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
534 VM_STACK_ENV_WRITE(ep, index, v);
535 }
536 else {
537 vm_env_write_slowpath(ep, index, v);
538 }
539}
540
541void
542rb_vm_env_write(const VALUE *ep, int index, VALUE v)
543{
544 vm_env_write(ep, index, v);
545}
546
547VALUE
548rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
549{
550 if (block_handler == VM_BLOCK_HANDLER_NONE) {
551 return Qnil;
552 }
553 else {
554 switch (vm_block_handler_type(block_handler)) {
555 case block_handler_type_iseq:
556 case block_handler_type_ifunc:
557 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
558 case block_handler_type_symbol:
559 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
560 case block_handler_type_proc:
561 return VM_BH_TO_PROC(block_handler);
562 default:
563 VM_UNREACHABLE(rb_vm_bh_to_procval);
564 }
565 }
566}
567
568/* svar */
569
570#if VM_CHECK_MODE > 0
571static int
572vm_svar_valid_p(VALUE svar)
573{
574 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
575 switch (imemo_type(svar)) {
576 case imemo_svar:
577 case imemo_cref:
578 case imemo_ment:
579 return TRUE;
580 default:
581 break;
582 }
583 }
584 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
585 return FALSE;
586}
587#endif
588
589static inline struct vm_svar *
590lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
591{
592 VALUE svar;
593
594 if (lep && (ec == NULL || ec->root_lep != lep)) {
595 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
596 }
597 else {
598 svar = ec->root_svar;
599 }
600
601 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
602
603 return (struct vm_svar *)svar;
604}
605
606static inline void
607lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
608{
609 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
610
611 if (lep && (ec == NULL || ec->root_lep != lep)) {
612 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
613 }
614 else {
615 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
616 }
617}
618
619static VALUE
620lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
621{
622 const struct vm_svar *svar = lep_svar(ec, lep);
623
624 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
625
626 switch (key) {
627 case VM_SVAR_LASTLINE:
628 return svar->lastline;
629 case VM_SVAR_BACKREF:
630 return svar->backref;
631 default: {
632 const VALUE ary = svar->others;
633
634 if (NIL_P(ary)) {
635 return Qnil;
636 }
637 else {
638 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
639 }
640 }
641 }
642}
643
644static struct vm_svar *
645svar_new(VALUE obj)
646{
647 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
648 *((VALUE *)&svar->lastline) = Qnil;
649 *((VALUE *)&svar->backref) = Qnil;
650 *((VALUE *)&svar->others) = Qnil;
651
652 return svar;
653}
654
655static void
656lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
657{
658 struct vm_svar *svar = lep_svar(ec, lep);
659
660 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
661 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
662 }
663
664 switch (key) {
665 case VM_SVAR_LASTLINE:
666 RB_OBJ_WRITE(svar, &svar->lastline, val);
667 return;
668 case VM_SVAR_BACKREF:
669 RB_OBJ_WRITE(svar, &svar->backref, val);
670 return;
671 default: {
672 VALUE ary = svar->others;
673
674 if (NIL_P(ary)) {
675 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
676 }
677 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
678 }
679 }
680}
681
682static inline VALUE
683vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
684{
685 VALUE val;
686
687 if (type == 0) {
688 val = lep_svar_get(ec, lep, key);
689 }
690 else {
691 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
692
693 if (type & 0x01) {
694 switch (type >> 1) {
695 case '&':
696 val = rb_reg_last_match(backref);
697 break;
698 case '`':
699 val = rb_reg_match_pre(backref);
700 break;
701 case '\'':
702 val = rb_reg_match_post(backref);
703 break;
704 case '+':
705 val = rb_reg_match_last(backref);
706 break;
707 default:
708 rb_bug("unexpected back-ref");
709 }
710 }
711 else {
712 val = rb_reg_nth_match((int)(type >> 1), backref);
713 }
714 }
715 return val;
716}
717
718static inline VALUE
719vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
720{
721 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
722 int nth = 0;
723
724 if (type & 0x01) {
725 switch (type >> 1) {
726 case '&':
727 case '`':
728 case '\'':
729 break;
730 case '+':
731 return rb_reg_last_defined(backref);
732 default:
733 rb_bug("unexpected back-ref");
734 }
735 }
736 else {
737 nth = (int)(type >> 1);
738 }
739 return rb_reg_nth_defined(nth, backref);
740}
741
742PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
744check_method_entry(VALUE obj, int can_be_svar)
745{
746 if (obj == Qfalse) return NULL;
747
748#if VM_CHECK_MODE > 0
749 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
750#endif
751
752 switch (imemo_type(obj)) {
753 case imemo_ment:
754 return (rb_callable_method_entry_t *)obj;
755 case imemo_cref:
756 return NULL;
757 case imemo_svar:
758 if (can_be_svar) {
759 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
760 }
761 default:
762#if VM_CHECK_MODE > 0
763 rb_bug("check_method_entry: svar should not be there:");
764#endif
765 return NULL;
766 }
767}
768
770rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
771{
772 const VALUE *ep = cfp->ep;
774
775 while (!VM_ENV_LOCAL_P(ep)) {
776 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
777 ep = VM_ENV_PREV_EP(ep);
778 }
779
780 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
781}
782
783static const rb_iseq_t *
784method_entry_iseqptr(const rb_callable_method_entry_t *me)
785{
786 switch (me->def->type) {
787 case VM_METHOD_TYPE_ISEQ:
788 return me->def->body.iseq.iseqptr;
789 default:
790 return NULL;
791 }
792}
793
794static rb_cref_t *
795method_entry_cref(const rb_callable_method_entry_t *me)
796{
797 switch (me->def->type) {
798 case VM_METHOD_TYPE_ISEQ:
799 return me->def->body.iseq.cref;
800 default:
801 return NULL;
802 }
803}
804
805#if VM_CHECK_MODE == 0
806PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
807#endif
808static rb_cref_t *
809check_cref(VALUE obj, int can_be_svar)
810{
811 if (obj == Qfalse) return NULL;
812
813#if VM_CHECK_MODE > 0
814 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
815#endif
816
817 switch (imemo_type(obj)) {
818 case imemo_ment:
819 return method_entry_cref((rb_callable_method_entry_t *)obj);
820 case imemo_cref:
821 return (rb_cref_t *)obj;
822 case imemo_svar:
823 if (can_be_svar) {
824 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
825 }
826 default:
827#if VM_CHECK_MODE > 0
828 rb_bug("check_method_entry: svar should not be there:");
829#endif
830 return NULL;
831 }
832}
833
834static inline rb_cref_t *
835vm_env_cref(const VALUE *ep)
836{
837 rb_cref_t *cref;
838
839 while (!VM_ENV_LOCAL_P(ep)) {
840 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
841 ep = VM_ENV_PREV_EP(ep);
842 }
843
844 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
845}
846
847static int
848is_cref(const VALUE v, int can_be_svar)
849{
850 if (RB_TYPE_P(v, T_IMEMO)) {
851 switch (imemo_type(v)) {
852 case imemo_cref:
853 return TRUE;
854 case imemo_svar:
855 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
856 default:
857 break;
858 }
859 }
860 return FALSE;
861}
862
863static int
864vm_env_cref_by_cref(const VALUE *ep)
865{
866 while (!VM_ENV_LOCAL_P(ep)) {
867 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
868 ep = VM_ENV_PREV_EP(ep);
869 }
870 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
871}
872
873static rb_cref_t *
874cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
875{
876 const VALUE v = *vptr;
877 rb_cref_t *cref, *new_cref;
878
879 if (RB_TYPE_P(v, T_IMEMO)) {
880 switch (imemo_type(v)) {
881 case imemo_cref:
882 cref = (rb_cref_t *)v;
883 new_cref = vm_cref_dup(cref);
884 if (parent) {
885 RB_OBJ_WRITE(parent, vptr, new_cref);
886 }
887 else {
888 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
889 }
890 return (rb_cref_t *)new_cref;
891 case imemo_svar:
892 if (can_be_svar) {
893 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
894 }
895 /* fall through */
896 case imemo_ment:
897 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
898 default:
899 break;
900 }
901 }
902 return NULL;
903}
904
905static rb_cref_t *
906vm_cref_replace_with_duplicated_cref(const VALUE *ep)
907{
908 if (vm_env_cref_by_cref(ep)) {
909 rb_cref_t *cref;
910 VALUE envval;
911
912 while (!VM_ENV_LOCAL_P(ep)) {
913 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
914 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
915 return cref;
916 }
917 ep = VM_ENV_PREV_EP(ep);
918 }
919 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
920 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
921 }
922 else {
923 rb_bug("vm_cref_dup: unreachable");
924 }
925}
926
927static rb_cref_t *
928vm_get_cref(const VALUE *ep)
929{
930 rb_cref_t *cref = vm_env_cref(ep);
931
932 if (cref != NULL) {
933 return cref;
934 }
935 else {
936 rb_bug("vm_get_cref: unreachable");
937 }
938}
939
940rb_cref_t *
941rb_vm_get_cref(const VALUE *ep)
942{
943 return vm_get_cref(ep);
944}
945
946static rb_cref_t *
947vm_ec_cref(const rb_execution_context_t *ec)
948{
949 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
950
951 if (cfp == NULL) {
952 return NULL;
953 }
954 return vm_get_cref(cfp->ep);
955}
956
957static const rb_cref_t *
958vm_get_const_key_cref(const VALUE *ep)
959{
960 const rb_cref_t *cref = vm_get_cref(ep);
961 const rb_cref_t *key_cref = cref;
962
963 while (cref) {
964 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
965 RCLASS_EXT(CREF_CLASS(cref))->cloned) {
966 return key_cref;
967 }
968 cref = CREF_NEXT(cref);
969 }
970
971 /* does not include singleton class */
972 return NULL;
973}
974
975void
976rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
977{
978 rb_cref_t *new_cref;
979
980 while (cref) {
981 if (CREF_CLASS(cref) == old_klass) {
982 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
983 *new_cref_ptr = new_cref;
984 return;
985 }
986 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
987 cref = CREF_NEXT(cref);
988 *new_cref_ptr = new_cref;
989 new_cref_ptr = &new_cref->next;
990 }
991 *new_cref_ptr = NULL;
992}
993
994static rb_cref_t *
995vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
996{
997 rb_cref_t *prev_cref = NULL;
998
999 if (ep) {
1000 prev_cref = vm_env_cref(ep);
1001 }
1002 else {
1003 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1004
1005 if (cfp) {
1006 prev_cref = vm_env_cref(cfp->ep);
1007 }
1008 }
1009
1010 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1011}
1012
1013static inline VALUE
1014vm_get_cbase(const VALUE *ep)
1015{
1016 const rb_cref_t *cref = vm_get_cref(ep);
1017
1018 return CREF_CLASS_FOR_DEFINITION(cref);
1019}
1020
1021static inline VALUE
1022vm_get_const_base(const VALUE *ep)
1023{
1024 const rb_cref_t *cref = vm_get_cref(ep);
1025
1026 while (cref) {
1027 if (!CREF_PUSHED_BY_EVAL(cref)) {
1028 return CREF_CLASS_FOR_DEFINITION(cref);
1029 }
1030 cref = CREF_NEXT(cref);
1031 }
1032
1033 return Qundef;
1034}
1035
1036static inline void
1037vm_check_if_namespace(VALUE klass)
1038{
1039 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1040 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1041 }
1042}
1043
1044static inline void
1045vm_ensure_not_refinement_module(VALUE self)
1046{
1047 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1048 rb_warn("not defined at the refinement, but at the outer class/module");
1049 }
1050}
1051
1052static inline VALUE
1053vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1054{
1055 return klass;
1056}
1057
1058static inline VALUE
1059vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1060{
1061 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1062 VALUE val;
1063
1064 if (NIL_P(orig_klass) && allow_nil) {
1065 /* in current lexical scope */
1066 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1067 const rb_cref_t *cref;
1068 VALUE klass = Qnil;
1069
1070 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1071 root_cref = CREF_NEXT(root_cref);
1072 }
1073 cref = root_cref;
1074 while (cref && CREF_NEXT(cref)) {
1075 if (CREF_PUSHED_BY_EVAL(cref)) {
1076 klass = Qnil;
1077 }
1078 else {
1079 klass = CREF_CLASS(cref);
1080 }
1081 cref = CREF_NEXT(cref);
1082
1083 if (!NIL_P(klass)) {
1084 VALUE av, am = 0;
1085 rb_const_entry_t *ce;
1086 search_continue:
1087 if ((ce = rb_const_lookup(klass, id))) {
1088 rb_const_warn_if_deprecated(ce, klass, id);
1089 val = ce->value;
1090 if (UNDEF_P(val)) {
1091 if (am == klass) break;
1092 am = klass;
1093 if (is_defined) return 1;
1094 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1095 rb_autoload_load(klass, id);
1096 goto search_continue;
1097 }
1098 else {
1099 if (is_defined) {
1100 return 1;
1101 }
1102 else {
1103 if (UNLIKELY(!rb_ractor_main_p())) {
1104 if (!rb_ractor_shareable_p(val)) {
1105 rb_raise(rb_eRactorIsolationError,
1106 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1107 }
1108 }
1109 return val;
1110 }
1111 }
1112 }
1113 }
1114 }
1115
1116 /* search self */
1117 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1118 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1119 }
1120 else {
1121 klass = CLASS_OF(ec->cfp->self);
1122 }
1123
1124 if (is_defined) {
1125 return rb_const_defined(klass, id);
1126 }
1127 else {
1128 return rb_const_get(klass, id);
1129 }
1130 }
1131 else {
1132 vm_check_if_namespace(orig_klass);
1133 if (is_defined) {
1134 return rb_public_const_defined_from(orig_klass, id);
1135 }
1136 else {
1137 return rb_public_const_get_from(orig_klass, id);
1138 }
1139 }
1140}
1141
1142VALUE
1143rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1144{
1145 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1146}
1147
1148static inline VALUE
1149vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1150{
1151 VALUE val = Qnil;
1152 int idx = 0;
1153 int allow_nil = TRUE;
1154 if (segments[0] == idNULL) {
1155 val = rb_cObject;
1156 idx++;
1157 allow_nil = FALSE;
1158 }
1159 while (segments[idx]) {
1160 ID id = segments[idx++];
1161 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1162 allow_nil = FALSE;
1163 }
1164 return val;
1165}
1166
1167
1168static inline VALUE
1169vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1170{
1171 VALUE klass;
1172
1173 if (!cref) {
1174 rb_bug("vm_get_cvar_base: no cref");
1175 }
1176
1177 while (CREF_NEXT(cref) &&
1178 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1179 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1180 cref = CREF_NEXT(cref);
1181 }
1182 if (top_level_raise && !CREF_NEXT(cref)) {
1183 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1184 }
1185
1186 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1187
1188 if (NIL_P(klass)) {
1189 rb_raise(rb_eTypeError, "no class variables available");
1190 }
1191 return klass;
1192}
1193
1194ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1195static inline void
1196fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1197{
1198 if (is_attr) {
1199 vm_cc_attr_index_set(cc, index, shape_id);
1200 }
1201 else {
1202 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1203 }
1204}
1205
1206#define ractor_incidental_shareable_p(cond, val) \
1207 (!(cond) || rb_ractor_shareable_p(val))
1208#define ractor_object_incidental_shareable_p(obj, val) \
1209 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1210
1211#define ATTR_INDEX_NOT_SET (attr_index_t)-1
1212
1213ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1214static inline VALUE
1215vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1216{
1217#if OPT_IC_FOR_IVAR
1218 VALUE val = Qundef;
1219 shape_id_t shape_id;
1220 VALUE * ivar_list;
1221
1222 if (SPECIAL_CONST_P(obj)) {
1223 return default_value;
1224 }
1225
1226#if SHAPE_IN_BASIC_FLAGS
1227 shape_id = RBASIC_SHAPE_ID(obj);
1228#endif
1229
1230 switch (BUILTIN_TYPE(obj)) {
1231 case T_OBJECT:
1232 ivar_list = ROBJECT_IVPTR(obj);
1233 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1234
1235#if !SHAPE_IN_BASIC_FLAGS
1236 shape_id = ROBJECT_SHAPE_ID(obj);
1237#endif
1238 break;
1239 case T_CLASS:
1240 case T_MODULE:
1241 {
1242 if (UNLIKELY(!rb_ractor_main_p())) {
1243 // For two reasons we can only use the fast path on the main
1244 // ractor.
1245 // First, only the main ractor is allowed to set ivars on classes
1246 // and modules. So we can skip locking.
1247 // Second, other ractors need to check the shareability of the
1248 // values returned from the class ivars.
1249
1250 if (default_value == Qundef) { // defined?
1251 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1252 }
1253 else {
1254 goto general_path;
1255 }
1256 }
1257
1258 ivar_list = RCLASS_IVPTR(obj);
1259
1260#if !SHAPE_IN_BASIC_FLAGS
1261 shape_id = RCLASS_SHAPE_ID(obj);
1262#endif
1263
1264 break;
1265 }
1266 default:
1267 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1268 struct gen_ivtbl *ivtbl;
1269 rb_gen_ivtbl_get(obj, id, &ivtbl);
1270#if !SHAPE_IN_BASIC_FLAGS
1271 shape_id = ivtbl->shape_id;
1272#endif
1273 ivar_list = ivtbl->as.shape.ivptr;
1274 }
1275 else {
1276 return default_value;
1277 }
1278 }
1279
1280 shape_id_t cached_id;
1281 attr_index_t index;
1282
1283 if (is_attr) {
1284 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1285 }
1286 else {
1287 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1288 }
1289
1290 if (LIKELY(cached_id == shape_id)) {
1291 RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1292
1293 if (index == ATTR_INDEX_NOT_SET) {
1294 return default_value;
1295 }
1296
1297 val = ivar_list[index];
1298#if USE_DEBUG_COUNTER
1299 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1300
1301 if (RB_TYPE_P(obj, T_OBJECT)) {
1302 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1303 }
1304#endif
1305 RUBY_ASSERT(!UNDEF_P(val));
1306 }
1307 else { // cache miss case
1308#if USE_DEBUG_COUNTER
1309 if (is_attr) {
1310 if (cached_id != INVALID_SHAPE_ID) {
1311 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1312 }
1313 else {
1314 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1315 }
1316 }
1317 else {
1318 if (cached_id != INVALID_SHAPE_ID) {
1319 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1320 }
1321 else {
1322 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1323 }
1324 }
1325 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1326
1327 if (RB_TYPE_P(obj, T_OBJECT)) {
1328 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1329 }
1330#endif
1331
1332 if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
1333 st_table *table = NULL;
1334 switch (BUILTIN_TYPE(obj)) {
1335 case T_CLASS:
1336 case T_MODULE:
1337 table = (st_table *)RCLASS_IVPTR(obj);
1338 break;
1339
1340 case T_OBJECT:
1341 table = ROBJECT_IV_HASH(obj);
1342 break;
1343
1344 default: {
1345 struct gen_ivtbl *ivtbl;
1346 if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
1347 table = ivtbl->as.complex.table;
1348 }
1349 break;
1350 }
1351 }
1352
1353 if (!table || !st_lookup(table, id, &val)) {
1354 val = default_value;
1355 }
1356 }
1357 else {
1358 shape_id_t previous_cached_id = cached_id;
1359 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1360 // This fills in the cache with the shared cache object.
1361 // "ent" is the shared cache object
1362 if (cached_id != previous_cached_id) {
1363 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1364 }
1365
1366 if (index == ATTR_INDEX_NOT_SET) {
1367 val = default_value;
1368 }
1369 else {
1370 // We fetched the ivar list above
1371 val = ivar_list[index];
1372 RUBY_ASSERT(!UNDEF_P(val));
1373 }
1374 }
1375 else {
1376 if (is_attr) {
1377 vm_cc_attr_index_initialize(cc, shape_id);
1378 }
1379 else {
1380 vm_ic_attr_index_initialize(ic, shape_id);
1381 }
1382
1383 val = default_value;
1384 }
1385 }
1386
1387 }
1388
1389 if (!UNDEF_P(default_value)) {
1390 RUBY_ASSERT(!UNDEF_P(val));
1391 }
1392
1393 return val;
1394
1395general_path:
1396#endif /* OPT_IC_FOR_IVAR */
1397 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1398
1399 if (is_attr) {
1400 return rb_attr_get(obj, id);
1401 }
1402 else {
1403 return rb_ivar_get(obj, id);
1404 }
1405}
1406
1407static void
1408populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1409{
1410 RUBY_ASSERT(next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1411
1412 // Cache population code
1413 if (is_attr) {
1414 vm_cc_attr_index_set(cc, index, next_shape_id);
1415 }
1416 else {
1417 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1418 }
1419}
1420
1421ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1422NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1423NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1424
1425static VALUE
1426vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1427{
1428#if OPT_IC_FOR_IVAR
1429 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1430
1431 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1432 rb_check_frozen(obj);
1433
1434 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1435
1436 shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1437
1438 if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
1439 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1440 }
1441
1442 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1443 return val;
1444 }
1445#endif
1446 return rb_ivar_set(obj, id, val);
1447}
1448
1449static VALUE
1450vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1451{
1452 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1453}
1454
1455static VALUE
1456vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1457{
1458 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1459}
1460
1461NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1462static VALUE
1463vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1464{
1465#if SHAPE_IN_BASIC_FLAGS
1466 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1467#else
1468 shape_id_t shape_id = rb_generic_shape_id(obj);
1469#endif
1470
1471 struct gen_ivtbl *ivtbl = 0;
1472
1473 // Cache hit case
1474 if (shape_id == dest_shape_id) {
1475 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1476 }
1477 else if (dest_shape_id != INVALID_SHAPE_ID) {
1478 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1479 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1480
1481 if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1482 RUBY_ASSERT(index < dest_shape->capacity);
1483 }
1484 else {
1485 return Qundef;
1486 }
1487 }
1488 else {
1489 return Qundef;
1490 }
1491
1492 rb_gen_ivtbl_get(obj, 0, &ivtbl);
1493
1494 if (shape_id != dest_shape_id) {
1495#if SHAPE_IN_BASIC_FLAGS
1496 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1497#else
1498 ivtbl->shape_id = dest_shape_id;
1499#endif
1500 }
1501
1502 RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
1503
1504 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1505
1506 return val;
1507}
1508
1509static inline VALUE
1510vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512#if OPT_IC_FOR_IVAR
1513 switch (BUILTIN_TYPE(obj)) {
1514 case T_OBJECT:
1515 {
1516 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1517
1518 shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1519 RUBY_ASSERT(dest_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1520
1521 if (LIKELY(shape_id == dest_shape_id)) {
1522 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1523 VM_ASSERT(!rb_ractor_shareable_p(obj));
1524 }
1525 else if (dest_shape_id != INVALID_SHAPE_ID) {
1526 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1527 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1528 shape_id_t source_shape_id = dest_shape->parent_id;
1529
1530 if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1531 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1532
1533 ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1534
1535 RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id), id) == dest_shape);
1536 RUBY_ASSERT(index < dest_shape->capacity);
1537 }
1538 else {
1539 break;
1540 }
1541 }
1542 else {
1543 break;
1544 }
1545
1546 VALUE *ptr = ROBJECT_IVPTR(obj);
1547
1548 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
1549 RB_OBJ_WRITE(obj, &ptr[index], val);
1550
1551 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1552 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1553 return val;
1554 }
1555 break;
1556 case T_CLASS:
1557 case T_MODULE:
1558 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1559 default:
1560 break;
1561 }
1562
1563 return Qundef;
1564#endif /* OPT_IC_FOR_IVAR */
1565}
1566
1567static VALUE
1568update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1569{
1570 VALUE defined_class = 0;
1571 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1572
1573 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1574 defined_class = RBASIC(defined_class)->klass;
1575 }
1576
1577 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1578 if (!rb_cvc_tbl) {
1579 rb_bug("the cvc table should be set");
1580 }
1581
1582 VALUE ent_data;
1583 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1584 rb_bug("should have cvar cache entry");
1585 }
1586
1587 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1588
1589 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1590 ent->cref = cref;
1591 ic->entry = ent;
1592
1593 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1594 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1595 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1596 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1597
1598 return cvar_value;
1599}
1600
1601static inline VALUE
1602vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1603{
1604 const rb_cref_t *cref;
1605 cref = vm_get_cref(GET_EP());
1606
1607 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1608 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1609
1610 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1611 RUBY_ASSERT(!UNDEF_P(v));
1612
1613 return v;
1614 }
1615
1616 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1617
1618 return update_classvariable_cache(iseq, klass, id, cref, ic);
1619}
1620
1621VALUE
1622rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1623{
1624 return vm_getclassvariable(iseq, cfp, id, ic);
1625}
1626
1627static inline void
1628vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1629{
1630 const rb_cref_t *cref;
1631 cref = vm_get_cref(GET_EP());
1632
1633 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1634 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1635
1636 rb_class_ivar_set(ic->entry->class_value, id, val);
1637 return;
1638 }
1639
1640 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1641
1642 rb_cvar_set(klass, id, val);
1643
1644 update_classvariable_cache(iseq, klass, id, cref, ic);
1645}
1646
1647void
1648rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1649{
1650 vm_setclassvariable(iseq, cfp, id, val, ic);
1651}
1652
1653static inline VALUE
1654vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1655{
1656 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1657}
1658
1659static inline void
1660vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1661{
1662 if (RB_SPECIAL_CONST_P(obj)) {
1664 return;
1665 }
1666
1667 shape_id_t dest_shape_id;
1668 attr_index_t index;
1669 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1670
1671 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1672 switch (BUILTIN_TYPE(obj)) {
1673 case T_OBJECT:
1674 case T_CLASS:
1675 case T_MODULE:
1676 break;
1677 default:
1678 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1679 return;
1680 }
1681 }
1682 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1683 }
1684}
1685
1686void
1687rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1688{
1689 vm_setinstancevariable(iseq, obj, id, val, ic);
1690}
1691
1692static VALUE
1693vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1694{
1695 /* continue throw */
1696
1697 if (FIXNUM_P(err)) {
1698 ec->tag->state = RUBY_TAG_FATAL;
1699 }
1700 else if (SYMBOL_P(err)) {
1701 ec->tag->state = TAG_THROW;
1702 }
1703 else if (THROW_DATA_P(err)) {
1704 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1705 }
1706 else {
1707 ec->tag->state = TAG_RAISE;
1708 }
1709 return err;
1710}
1711
1712static VALUE
1713vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1714 const int flag, const VALUE throwobj)
1715{
1716 const rb_control_frame_t *escape_cfp = NULL;
1717 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1718
1719 if (flag != 0) {
1720 /* do nothing */
1721 }
1722 else if (state == TAG_BREAK) {
1723 int is_orphan = 1;
1724 const VALUE *ep = GET_EP();
1725 const rb_iseq_t *base_iseq = GET_ISEQ();
1726 escape_cfp = reg_cfp;
1727
1728 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1729 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1730 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1731 ep = escape_cfp->ep;
1732 base_iseq = escape_cfp->iseq;
1733 }
1734 else {
1735 ep = VM_ENV_PREV_EP(ep);
1736 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1737 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1738 VM_ASSERT(escape_cfp->iseq == base_iseq);
1739 }
1740 }
1741
1742 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1743 /* lambda{... break ...} */
1744 is_orphan = 0;
1745 state = TAG_RETURN;
1746 }
1747 else {
1748 ep = VM_ENV_PREV_EP(ep);
1749
1750 while (escape_cfp < eocfp) {
1751 if (escape_cfp->ep == ep) {
1752 const rb_iseq_t *const iseq = escape_cfp->iseq;
1753 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1754 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1755 unsigned int i;
1756
1757 if (!ct) break;
1758 for (i=0; i < ct->size; i++) {
1759 const struct iseq_catch_table_entry *const entry =
1760 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1761
1762 if (entry->type == CATCH_TYPE_BREAK &&
1763 entry->iseq == base_iseq &&
1764 entry->start < epc && entry->end >= epc) {
1765 if (entry->cont == epc) { /* found! */
1766 is_orphan = 0;
1767 }
1768 break;
1769 }
1770 }
1771 break;
1772 }
1773
1774 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1775 }
1776 }
1777
1778 if (is_orphan) {
1779 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1780 }
1781 }
1782 else if (state == TAG_RETRY) {
1783 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1784
1785 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1786 }
1787 else if (state == TAG_RETURN) {
1788 const VALUE *current_ep = GET_EP();
1789 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1790 int in_class_frame = 0;
1791 int toplevel = 1;
1792 escape_cfp = reg_cfp;
1793
1794 // find target_lep, target_ep
1795 while (!VM_ENV_LOCAL_P(ep)) {
1796 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1797 target_ep = ep;
1798 }
1799 ep = VM_ENV_PREV_EP(ep);
1800 }
1801 target_lep = ep;
1802
1803 while (escape_cfp < eocfp) {
1804 const VALUE *lep = VM_CF_LEP(escape_cfp);
1805
1806 if (!target_lep) {
1807 target_lep = lep;
1808 }
1809
1810 if (lep == target_lep &&
1811 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1812 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1813 in_class_frame = 1;
1814 target_lep = 0;
1815 }
1816
1817 if (lep == target_lep) {
1818 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1819 toplevel = 0;
1820 if (in_class_frame) {
1821 /* lambda {class A; ... return ...; end} */
1822 goto valid_return;
1823 }
1824 else {
1825 const VALUE *tep = current_ep;
1826
1827 while (target_lep != tep) {
1828 if (escape_cfp->ep == tep) {
1829 /* in lambda */
1830 if (tep == target_ep) {
1831 goto valid_return;
1832 }
1833 else {
1834 goto unexpected_return;
1835 }
1836 }
1837 tep = VM_ENV_PREV_EP(tep);
1838 }
1839 }
1840 }
1841 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1842 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1843 case ISEQ_TYPE_TOP:
1844 case ISEQ_TYPE_MAIN:
1845 if (toplevel) {
1846 if (in_class_frame) goto unexpected_return;
1847 if (target_ep == NULL) {
1848 goto valid_return;
1849 }
1850 else {
1851 goto unexpected_return;
1852 }
1853 }
1854 break;
1855 case ISEQ_TYPE_EVAL: {
1856 const rb_iseq_t *is = escape_cfp->iseq;
1857 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1858 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1859 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1860 t = ISEQ_BODY(is)->type;
1861 }
1862 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1863 break;
1864 }
1865 case ISEQ_TYPE_CLASS:
1866 toplevel = 0;
1867 break;
1868 default:
1869 break;
1870 }
1871 }
1872 }
1873
1874 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1875 if (target_ep == NULL) {
1876 goto valid_return;
1877 }
1878 else {
1879 goto unexpected_return;
1880 }
1881 }
1882
1883 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1884 }
1885 unexpected_return:;
1886 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1887
1888 valid_return:;
1889 /* do nothing */
1890 }
1891 else {
1892 rb_bug("isns(throw): unsupported throw type");
1893 }
1894
1895 ec->tag->state = state;
1896 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1897}
1898
1899static VALUE
1900vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1901 rb_num_t throw_state, VALUE throwobj)
1902{
1903 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1904 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1905
1906 if (state != 0) {
1907 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1908 }
1909 else {
1910 return vm_throw_continue(ec, throwobj);
1911 }
1912}
1913
1914VALUE
1915rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1916{
1917 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1918}
1919
1920static inline void
1921vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1922{
1923 int is_splat = flag & 0x01;
1924 const VALUE *ptr;
1925 rb_num_t len;
1926 const VALUE obj = ary;
1927
1928 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1929 ary = obj;
1930 ptr = &ary;
1931 len = 1;
1932 }
1933 else {
1934 ptr = RARRAY_CONST_PTR(ary);
1935 len = (rb_num_t)RARRAY_LEN(ary);
1936 }
1937
1938 if (num + is_splat == 0) {
1939 /* no space left on stack */
1940 }
1941 else if (flag & 0x02) {
1942 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1943 rb_num_t i = 0, j;
1944
1945 if (len < num) {
1946 for (i = 0; i < num - len; i++) {
1947 *cfp->sp++ = Qnil;
1948 }
1949 }
1950
1951 for (j = 0; i < num; i++, j++) {
1952 VALUE v = ptr[len - j - 1];
1953 *cfp->sp++ = v;
1954 }
1955
1956 if (is_splat) {
1957 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1958 }
1959 }
1960 else {
1961 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1962 if (is_splat) {
1963 if (num > len) {
1964 *cfp->sp++ = rb_ary_new();
1965 }
1966 else {
1967 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1968 }
1969 }
1970
1971 if (num > len) {
1972 rb_num_t i = 0;
1973 for (; i < num - len; i++) {
1974 *cfp->sp++ = Qnil;
1975 }
1976
1977 for (rb_num_t j = 0; i < num; i++, j++) {
1978 *cfp->sp++ = ptr[len - j - 1];
1979 }
1980 }
1981 else {
1982 for (rb_num_t j = 0; j < num; j++) {
1983 *cfp->sp++ = ptr[num - j - 1];
1984 }
1985 }
1986 }
1987
1988 RB_GC_GUARD(ary);
1989}
1990
1991static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1992
1993static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1994
1995static struct rb_class_cc_entries *
1996vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1997{
1998 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1999#if VM_CHECK_MODE > 0
2000 ccs->debug_sig = ~(VALUE)ccs;
2001#endif
2002 ccs->capa = 0;
2003 ccs->len = 0;
2004 ccs->cme = cme;
2005 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2006 ccs->entries = NULL;
2007
2008 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2009 RB_OBJ_WRITTEN(klass, Qundef, cme);
2010 return ccs;
2011}
2012
2013static void
2014vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2015{
2016 if (! vm_cc_markable(cc)) {
2017 return;
2018 }
2019
2020 if (UNLIKELY(ccs->len == ccs->capa)) {
2021 if (ccs->capa == 0) {
2022 ccs->capa = 1;
2023 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2024 }
2025 else {
2026 ccs->capa *= 2;
2027 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2028 }
2029 }
2030 VM_ASSERT(ccs->len < ccs->capa);
2031
2032 const int pos = ccs->len++;
2033 ccs->entries[pos].argc = vm_ci_argc(ci);
2034 ccs->entries[pos].flag = vm_ci_flag(ci);
2035 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2036
2037 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2038 // for tuning
2039 // vm_mtbl_dump(klass, 0);
2040 }
2041}
2042
2043#if VM_CHECK_MODE > 0
2044void
2045rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2046{
2047 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2048 for (int i=0; i<ccs->len; i++) {
2049 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2050 ccs->entries[i].flag,
2051 ccs->entries[i].argc);
2052 rp(ccs->entries[i].cc);
2053 }
2054}
2055
2056static int
2057vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2058{
2059 VM_ASSERT(vm_ccs_p(ccs));
2060 VM_ASSERT(ccs->len <= ccs->capa);
2061
2062 for (int i=0; i<ccs->len; i++) {
2063 const struct rb_callcache *cc = ccs->entries[i].cc;
2064
2065 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2066 VM_ASSERT(vm_cc_class_check(cc, klass));
2067 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2068 VM_ASSERT(!vm_cc_super_p(cc));
2069 VM_ASSERT(!vm_cc_refinement_p(cc));
2070 }
2071 return TRUE;
2072}
2073#endif
2074
2075const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2076
2077static const struct rb_callcache *
2078vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2079{
2080 const ID mid = vm_ci_mid(ci);
2081 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2082 struct rb_class_cc_entries *ccs = NULL;
2083 VALUE ccs_data;
2084
2085 if (cc_tbl) {
2086 // CCS data is keyed on method id, so we don't need the method id
2087 // for doing comparisons in the `for` loop below.
2088 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2089 ccs = (struct rb_class_cc_entries *)ccs_data;
2090 const int ccs_len = ccs->len;
2091
2092 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2093 rb_vm_ccs_free(ccs);
2094 rb_id_table_delete(cc_tbl, mid);
2095 ccs = NULL;
2096 }
2097 else {
2098 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2099
2100 // We already know the method id is correct because we had
2101 // to look up the ccs_data by method id. All we need to
2102 // compare is argc and flag
2103 unsigned int argc = vm_ci_argc(ci);
2104 unsigned int flag = vm_ci_flag(ci);
2105
2106 for (int i=0; i<ccs_len; i++) {
2107 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2108 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2109 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2110
2111 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2112
2113 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2114 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2115
2116 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2117 VM_ASSERT(ccs_cc->klass == klass);
2118 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2119
2120 return ccs_cc;
2121 }
2122 }
2123 }
2124 }
2125 }
2126 else {
2127 cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
2128 }
2129
2130 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2131
2132 const rb_callable_method_entry_t *cme;
2133
2134 if (ccs) {
2135 cme = ccs->cme;
2136 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2137
2138 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2139 }
2140 else {
2141 cme = rb_callable_method_entry(klass, mid);
2142 }
2143
2144 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2145
2146 if (cme == NULL) {
2147 // undef or not found: can't cache the information
2148 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2149 return &vm_empty_cc;
2150 }
2151
2152 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2153
2154 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2155
2156 if (ccs == NULL) {
2157 VM_ASSERT(cc_tbl != NULL);
2158
2159 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2160 // rb_callable_method_entry() prepares ccs.
2161 ccs = (struct rb_class_cc_entries *)ccs_data;
2162 }
2163 else {
2164 // TODO: required?
2165 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2166 }
2167 }
2168
2169 cme = rb_check_overloaded_cme(cme, ci);
2170
2171 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2172 vm_ccs_push(klass, ccs, ci, cc);
2173
2174 VM_ASSERT(vm_cc_cme(cc) != NULL);
2175 VM_ASSERT(cme->called_id == mid);
2176 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2177
2178 return cc;
2179}
2180
2181const struct rb_callcache *
2182rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2183{
2184 const struct rb_callcache *cc;
2185
2186 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2187
2188 RB_VM_LOCK_ENTER();
2189 {
2190 cc = vm_search_cc(klass, ci);
2191
2192 VM_ASSERT(cc);
2193 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2194 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2195 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2196 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2197 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2198 }
2199 RB_VM_LOCK_LEAVE();
2200
2201 return cc;
2202}
2203
2204static const struct rb_callcache *
2205vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2206{
2207#if USE_DEBUG_COUNTER
2208 const struct rb_callcache *old_cc = cd->cc;
2209#endif
2210
2211 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2212
2213#if OPT_INLINE_METHOD_CACHE
2214 cd->cc = cc;
2215
2216 const struct rb_callcache *empty_cc = &vm_empty_cc;
2217 if (cd_owner && cc != empty_cc) {
2218 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2219 }
2220
2221#if USE_DEBUG_COUNTER
2222 if (!old_cc || old_cc == empty_cc) {
2223 // empty
2224 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2225 }
2226 else if (old_cc == cc) {
2227 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2228 }
2229 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2230 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2231 }
2232 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2233 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2234 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2235 }
2236 else {
2237 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2238 }
2239#endif
2240#endif // OPT_INLINE_METHOD_CACHE
2241
2242 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2243 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2244
2245 return cc;
2246}
2247
2248ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2249static const struct rb_callcache *
2250vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2251{
2252 const struct rb_callcache *cc = cd->cc;
2253
2254#if OPT_INLINE_METHOD_CACHE
2255 if (LIKELY(vm_cc_class_check(cc, klass))) {
2256 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2257 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2258 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2259 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2260 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2261 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2262
2263 return cc;
2264 }
2265 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2266 }
2267 else {
2268 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2269 }
2270#endif
2271
2272 return vm_search_method_slowpath0(cd_owner, cd, klass);
2273}
2274
2275static const struct rb_callcache *
2276vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2277{
2278 VALUE klass = CLASS_OF(recv);
2279 VM_ASSERT(klass != Qfalse);
2280 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2281
2282 return vm_search_method_fastpath(cd_owner, cd, klass);
2283}
2284
2285#if __has_attribute(transparent_union)
2286typedef union {
2287 VALUE (*anyargs)(ANYARGS);
2288 VALUE (*f00)(VALUE);
2289 VALUE (*f01)(VALUE, VALUE);
2290 VALUE (*f02)(VALUE, VALUE, VALUE);
2291 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2292 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2293 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2294 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2295 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2304 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2305} __attribute__((__transparent_union__)) cfunc_type;
2306# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2307#else
2308typedef VALUE (*cfunc_type)(ANYARGS);
2309# define make_cfunc_type(f) (cfunc_type)(f)
2310#endif
2311
2312static inline int
2313check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2314{
2315 if (! me) {
2316 return false;
2317 }
2318 else {
2319 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2320 VM_ASSERT(callable_method_entry_p(me));
2321 VM_ASSERT(me->def);
2322 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2323 return false;
2324 }
2325 else {
2326#if __has_attribute(transparent_union)
2327 return me->def->body.cfunc.func == func.anyargs;
2328#else
2329 return me->def->body.cfunc.func == func;
2330#endif
2331 }
2332 }
2333}
2334
2335static inline int
2336check_method_basic_definition(const rb_callable_method_entry_t *me)
2337{
2338 return me && METHOD_ENTRY_BASIC(me);
2339}
2340
2341static inline int
2342vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2343{
2344 VM_ASSERT(iseq != NULL);
2345 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2346 return check_cfunc(vm_cc_cme(cc), func);
2347}
2348
2349#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2350#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2351
2352#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2353
2354static inline bool
2355FIXNUM_2_P(VALUE a, VALUE b)
2356{
2357 /* FIXNUM_P(a) && FIXNUM_P(b)
2358 * == ((a & 1) && (b & 1))
2359 * == a & b & 1 */
2360 SIGNED_VALUE x = a;
2361 SIGNED_VALUE y = b;
2362 SIGNED_VALUE z = x & y & 1;
2363 return z == 1;
2364}
2365
2366static inline bool
2367FLONUM_2_P(VALUE a, VALUE b)
2368{
2369#if USE_FLONUM
2370 /* FLONUM_P(a) && FLONUM_P(b)
2371 * == ((a & 3) == 2) && ((b & 3) == 2)
2372 * == ! ((a ^ 2) | (b ^ 2) & 3)
2373 */
2374 SIGNED_VALUE x = a;
2375 SIGNED_VALUE y = b;
2376 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2377 return !z;
2378#else
2379 return false;
2380#endif
2381}
2382
2383static VALUE
2384opt_equality_specialized(VALUE recv, VALUE obj)
2385{
2386 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2387 goto compare_by_identity;
2388 }
2389 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2390 goto compare_by_identity;
2391 }
2392 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2393 goto compare_by_identity;
2394 }
2395 else if (SPECIAL_CONST_P(recv)) {
2396 //
2397 }
2398 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2399 double a = RFLOAT_VALUE(recv);
2400 double b = RFLOAT_VALUE(obj);
2401
2402#if MSC_VERSION_BEFORE(1300)
2403 if (isnan(a)) {
2404 return Qfalse;
2405 }
2406 else if (isnan(b)) {
2407 return Qfalse;
2408 }
2409 else
2410#endif
2411 return RBOOL(a == b);
2412 }
2413 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2414 if (recv == obj) {
2415 return Qtrue;
2416 }
2417 else if (RB_TYPE_P(obj, T_STRING)) {
2418 return rb_str_eql_internal(obj, recv);
2419 }
2420 }
2421 return Qundef;
2422
2423 compare_by_identity:
2424 return RBOOL(recv == obj);
2425}
2426
2427static VALUE
2428opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2429{
2430 VM_ASSERT(cd_owner != NULL);
2431
2432 VALUE val = opt_equality_specialized(recv, obj);
2433 if (!UNDEF_P(val)) return val;
2434
2435 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2436 return Qundef;
2437 }
2438 else {
2439 return RBOOL(recv == obj);
2440 }
2441}
2442
2443#undef EQ_UNREDEFINED_P
2444
2445static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2446NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2447
2448static VALUE
2449opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2450{
2451 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2452
2453 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2454 return RBOOL(recv == obj);
2455 }
2456 else {
2457 return Qundef;
2458 }
2459}
2460
2461static VALUE
2462opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2463{
2464 VALUE val = opt_equality_specialized(recv, obj);
2465 if (!UNDEF_P(val)) {
2466 return val;
2467 }
2468 else {
2469 return opt_equality_by_mid_slowpath(recv, obj, mid);
2470 }
2471}
2472
2473VALUE
2474rb_equal_opt(VALUE obj1, VALUE obj2)
2475{
2476 return opt_equality_by_mid(obj1, obj2, idEq);
2477}
2478
2479VALUE
2480rb_eql_opt(VALUE obj1, VALUE obj2)
2481{
2482 return opt_equality_by_mid(obj1, obj2, idEqlP);
2483}
2484
2485extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2486extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2487
2488static VALUE
2489check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2490{
2491 switch (type) {
2492 case VM_CHECKMATCH_TYPE_WHEN:
2493 return pattern;
2494 case VM_CHECKMATCH_TYPE_RESCUE:
2495 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2496 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2497 }
2498 /* fall through */
2499 case VM_CHECKMATCH_TYPE_CASE: {
2500 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2501 }
2502 default:
2503 rb_bug("check_match: unreachable");
2504 }
2505}
2506
2507
2508#if MSC_VERSION_BEFORE(1300)
2509#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2510#else
2511#define CHECK_CMP_NAN(a, b) /* do nothing */
2512#endif
2513
2514static inline VALUE
2515double_cmp_lt(double a, double b)
2516{
2517 CHECK_CMP_NAN(a, b);
2518 return RBOOL(a < b);
2519}
2520
2521static inline VALUE
2522double_cmp_le(double a, double b)
2523{
2524 CHECK_CMP_NAN(a, b);
2525 return RBOOL(a <= b);
2526}
2527
2528static inline VALUE
2529double_cmp_gt(double a, double b)
2530{
2531 CHECK_CMP_NAN(a, b);
2532 return RBOOL(a > b);
2533}
2534
2535static inline VALUE
2536double_cmp_ge(double a, double b)
2537{
2538 CHECK_CMP_NAN(a, b);
2539 return RBOOL(a >= b);
2540}
2541
2542// Copied by vm_dump.c
2543static inline VALUE *
2544vm_base_ptr(const rb_control_frame_t *cfp)
2545{
2546 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2547
2548 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2549 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2550
2551 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2552 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2553 int params = ISEQ_BODY(cfp->iseq)->param.size;
2554
2555 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2556 bp += vm_ci_argc(ci);
2557 }
2558
2559 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2560 /* adjust `self' */
2561 bp += 1;
2562 }
2563#if VM_DEBUG_BP_CHECK
2564 if (bp != cfp->bp_check) {
2565 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2566 (long)(cfp->bp_check - GET_EC()->vm_stack),
2567 (long)(bp - GET_EC()->vm_stack));
2568 rb_bug("vm_base_ptr: unreachable");
2569 }
2570#endif
2571 return bp;
2572 }
2573 else {
2574 return NULL;
2575 }
2576}
2577
2578VALUE *
2579rb_vm_base_ptr(const rb_control_frame_t *cfp)
2580{
2581 return vm_base_ptr(cfp);
2582}
2583
2584/* method call processes with call_info */
2585
2586#include "vm_args.c"
2587
2588static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2589ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2590static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2591static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2592static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2593static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2594static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2595
2596static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2597
2598static VALUE
2599vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2600{
2601 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2602
2603 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2604}
2605
2606static VALUE
2607vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2608{
2609 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2610
2611 const struct rb_callcache *cc = calling->cc;
2612 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2613 int param = ISEQ_BODY(iseq)->param.size;
2614 int local = ISEQ_BODY(iseq)->local_table_size;
2615 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2616}
2617
2618bool
2619rb_simple_iseq_p(const rb_iseq_t *iseq)
2620{
2621 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2622 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2623 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2624 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2627 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2628 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2629}
2630
2631bool
2632rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2633{
2634 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2635 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2636 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2637 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2638 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2639 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2640 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2641 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2642}
2643
2644bool
2645rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2646{
2647 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2648 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2649 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2650 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2651 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2652 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2653 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2654}
2655
2656#define ALLOW_HEAP_ARGV (-2)
2657#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2658
2659static inline bool
2660vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2661{
2662 vm_check_canary(GET_EC(), cfp->sp);
2663 bool ret = false;
2664
2665 if (!NIL_P(ary)) {
2666 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2667 long len = RARRAY_LEN(ary);
2668 int argc = calling->argc;
2669
2670 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2671 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2672 * a temporary array, instead of trying to keeping arguments on the VM stack.
2673 */
2674 VALUE *argv = cfp->sp - argc;
2675 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2676 rb_ary_cat(argv_ary, argv, argc);
2677 rb_ary_cat(argv_ary, ptr, len);
2678 cfp->sp -= argc - 1;
2679 cfp->sp[-1] = argv_ary;
2680 calling->argc = 1;
2681 calling->heap_argv = argv_ary;
2682 RB_GC_GUARD(ary);
2683 }
2684 else {
2685 long i;
2686
2687 if (max_args >= 0 && len + argc > max_args) {
2688 /* If only a given max_args is allowed, copy up to max args.
2689 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2690 * where additional arguments are ignored.
2691 *
2692 * Also, copy up to one more argument than the maximum,
2693 * in case it is an empty keyword hash that will be removed.
2694 */
2695 calling->argc += len - (max_args - argc + 1);
2696 len = max_args - argc + 1;
2697 ret = true;
2698 }
2699 else {
2700 /* Unset heap_argv if set originally. Can happen when
2701 * forwarding modified arguments, where heap_argv was used
2702 * originally, but heap_argv not supported by the forwarded
2703 * method in all cases.
2704 */
2705 calling->heap_argv = 0;
2706 }
2707 CHECK_VM_STACK_OVERFLOW(cfp, len);
2708
2709 for (i = 0; i < len; i++) {
2710 *cfp->sp++ = ptr[i];
2711 }
2712 calling->argc += i;
2713 }
2714 }
2715
2716 return ret;
2717}
2718
2719static inline void
2720vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2721{
2722 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2723 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2724 const VALUE h = rb_hash_new_with_size(kw_len);
2725 VALUE *sp = cfp->sp;
2726 int i;
2727
2728 for (i=0; i<kw_len; i++) {
2729 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2730 }
2731 (sp-kw_len)[0] = h;
2732
2733 cfp->sp -= kw_len - 1;
2734 calling->argc -= kw_len - 1;
2735 calling->kw_splat = 1;
2736}
2737
2738static inline VALUE
2739vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2740{
2741 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2742 if (keyword_hash != Qnil) {
2743 /* Convert a non-hash keyword splat to a new hash */
2744 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2745 }
2746 }
2747 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2748 /* Convert a hash keyword splat to a new hash unless
2749 * a mutable keyword splat was passed.
2750 * Skip allocating new hash for empty keyword splat, as empty
2751 * keyword splat will be ignored by both callers.
2752 */
2753 keyword_hash = rb_hash_dup(keyword_hash);
2754 }
2755 return keyword_hash;
2756}
2757
2758static inline void
2759CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2760 struct rb_calling_info *restrict calling,
2761 const struct rb_callinfo *restrict ci, int max_args)
2762{
2763 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2764 if (IS_ARGS_KW_SPLAT(ci)) {
2765 // f(*a, **kw)
2766 VM_ASSERT(calling->kw_splat == 1);
2767
2768 cfp->sp -= 2;
2769 calling->argc -= 2;
2770 VALUE ary = cfp->sp[0];
2771 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2772
2773 // splat a
2774 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2775
2776 // put kw
2777 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2778 if (UNLIKELY(calling->heap_argv)) {
2779 rb_ary_push(calling->heap_argv, kwh);
2780 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2781 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2782 calling->kw_splat = 0;
2783 }
2784 }
2785 else {
2786 cfp->sp[0] = kwh;
2787 cfp->sp++;
2788 calling->argc++;
2789
2790 VM_ASSERT(calling->kw_splat == 1);
2791 }
2792 }
2793 else {
2794 calling->kw_splat = 0;
2795 }
2796 }
2797 else {
2798 // f(*a)
2799 VM_ASSERT(calling->kw_splat == 0);
2800
2801 cfp->sp -= 1;
2802 calling->argc -= 1;
2803 VALUE ary = cfp->sp[0];
2804
2805 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2806 goto check_keyword;
2807 }
2808
2809 // check the last argument
2810 VALUE last_hash, argv_ary;
2811 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2812 if (!IS_ARGS_KEYWORD(ci) &&
2813 RARRAY_LEN(argv_ary) > 0 &&
2814 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2815 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2816
2817 rb_ary_pop(argv_ary);
2818 if (!RHASH_EMPTY_P(last_hash)) {
2819 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2820 calling->kw_splat = 1;
2821 }
2822 }
2823 }
2824 else {
2825check_keyword:
2826 if (!IS_ARGS_KEYWORD(ci) &&
2827 calling->argc > 0 &&
2828 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2829 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2830
2831 if (RHASH_EMPTY_P(last_hash)) {
2832 calling->argc--;
2833 cfp->sp -= 1;
2834 }
2835 else {
2836 cfp->sp[-1] = rb_hash_dup(last_hash);
2837 calling->kw_splat = 1;
2838 }
2839 }
2840 }
2841 }
2842 }
2843 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2844 // f(**kw)
2845 VM_ASSERT(calling->kw_splat == 1);
2846 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2847
2848 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2849 cfp->sp--;
2850 calling->argc--;
2851 calling->kw_splat = 0;
2852 }
2853 else {
2854 cfp->sp[-1] = kwh;
2855 }
2856 }
2857 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2858 // f(k1:1, k2:2)
2859 VM_ASSERT(calling->kw_splat == 0);
2860
2861 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2862 * by creating a keyword hash.
2863 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2864 */
2865 vm_caller_setup_arg_kw(cfp, calling, ci);
2866 }
2867}
2868
2869#define USE_OPT_HIST 0
2870
2871#if USE_OPT_HIST
2872#define OPT_HIST_MAX 64
2873static int opt_hist[OPT_HIST_MAX+1];
2874
2875__attribute__((destructor))
2876static void
2877opt_hist_show_results_at_exit(void)
2878{
2879 for (int i=0; i<OPT_HIST_MAX; i++) {
2880 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2881 }
2882}
2883#endif
2884
2885static VALUE
2886vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2887 struct rb_calling_info *calling)
2888{
2889 const struct rb_callcache *cc = calling->cc;
2890 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2891 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2892 const int opt = calling->argc - lead_num;
2893 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2894 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2895 const int param = ISEQ_BODY(iseq)->param.size;
2896 const int local = ISEQ_BODY(iseq)->local_table_size;
2897 const int delta = opt_num - opt;
2898
2899 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2900
2901#if USE_OPT_HIST
2902 if (opt_pc < OPT_HIST_MAX) {
2903 opt_hist[opt]++;
2904 }
2905 else {
2906 opt_hist[OPT_HIST_MAX]++;
2907 }
2908#endif
2909
2910 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2911}
2912
2913static VALUE
2914vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2915 struct rb_calling_info *calling)
2916{
2917 const struct rb_callcache *cc = calling->cc;
2918 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2919 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2920 const int opt = calling->argc - lead_num;
2921 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2922
2923 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2924
2925#if USE_OPT_HIST
2926 if (opt_pc < OPT_HIST_MAX) {
2927 opt_hist[opt]++;
2928 }
2929 else {
2930 opt_hist[OPT_HIST_MAX]++;
2931 }
2932#endif
2933
2934 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2935}
2936
2937static void
2938args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2939 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2940 VALUE *const locals);
2941
2942static VALUE
2943vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2944 struct rb_calling_info *calling)
2945{
2946 const struct rb_callcache *cc = calling->cc;
2947 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2948 int param_size = ISEQ_BODY(iseq)->param.size;
2949 int local_size = ISEQ_BODY(iseq)->local_table_size;
2950
2951 // Setting up local size and param size
2952 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2953
2954 local_size = local_size + vm_ci_argc(calling->cd->ci);
2955 param_size = param_size + vm_ci_argc(calling->cd->ci);
2956
2957 cfp->sp[0] = (VALUE)calling->cd->ci;
2958
2959 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2960}
2961
2962static VALUE
2963vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2964 struct rb_calling_info *calling)
2965{
2966 const struct rb_callinfo *ci = calling->cd->ci;
2967 const struct rb_callcache *cc = calling->cc;
2968
2969 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2970 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2971
2972 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2973 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2974 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2975 const int ci_kw_len = kw_arg->keyword_len;
2976 const VALUE * const ci_keywords = kw_arg->keywords;
2977 VALUE *argv = cfp->sp - calling->argc;
2978 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2979 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2980 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2981 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2982 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2983
2984 int param = ISEQ_BODY(iseq)->param.size;
2985 int local = ISEQ_BODY(iseq)->local_table_size;
2986 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2987}
2988
2989static VALUE
2990vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2991 struct rb_calling_info *calling)
2992{
2993 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2994 const struct rb_callcache *cc = calling->cc;
2995
2996 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2997 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2998
2999 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3000 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3001 VALUE * const argv = cfp->sp - calling->argc;
3002 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3003
3004 int i;
3005 for (i=0; i<kw_param->num; i++) {
3006 klocals[i] = kw_param->default_values[i];
3007 }
3008 klocals[i] = INT2FIX(0); // kw specify flag
3009 // NOTE:
3010 // nobody check this value, but it should be cleared because it can
3011 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3012
3013 int param = ISEQ_BODY(iseq)->param.size;
3014 int local = ISEQ_BODY(iseq)->local_table_size;
3015 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3016}
3017
3018static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3019
3020static VALUE
3021vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3022 struct rb_calling_info *calling)
3023{
3024 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3025 cfp->sp -= (calling->argc + 1);
3026 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3027 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3028}
3029
3030VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3031
3032static void
3033warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3034{
3035 rb_vm_t *vm = GET_VM();
3036 st_table *dup_check_table = vm->unused_block_warning_table;
3037 st_data_t key;
3038 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3039
3040 union {
3041 VALUE v;
3042 unsigned char b[SIZEOF_VALUE];
3043 } k1 = {
3044 .v = (VALUE)pc,
3045 }, k2 = {
3046 .v = (VALUE)cme->def,
3047 };
3048
3049 // relax check
3050 if (!strict_unused_block) {
3051 key = (st_data_t)cme->def->original_id;
3052
3053 if (st_lookup(dup_check_table, key, NULL)) {
3054 return;
3055 }
3056 }
3057
3058 // strict check
3059 // make unique key from pc and me->def pointer
3060 key = 0;
3061 for (int i=0; i<SIZEOF_VALUE; i++) {
3062 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3063 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3064 }
3065
3066 if (0) {
3067 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3068 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3069 fprintf(stderr, "key:%p\n", (void *)key);
3070 }
3071
3072 // duplication check
3073 if (st_insert(dup_check_table, key, 1)) {
3074 // already shown
3075 }
3076 else if (RTEST(ruby_verbose) || strict_unused_block) {
3077 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3078 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3079
3080 if (!NIL_P(m_loc)) {
3081 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3082 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3083 }
3084 else {
3085 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3086 }
3087 }
3088}
3089
3090static inline int
3091vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3092 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3093{
3094 const struct rb_callinfo *ci = calling->cd->ci;
3095 const struct rb_callcache *cc = calling->cc;
3096
3097 VM_ASSERT((vm_ci_argc(ci), 1));
3098 VM_ASSERT(vm_cc_cme(cc) != NULL);
3099
3100 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3101 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3102 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3103 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3104 }
3105
3106 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3107 if (LIKELY(rb_simple_iseq_p(iseq))) {
3108 rb_control_frame_t *cfp = ec->cfp;
3109 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3110 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3111
3112 if (calling->argc != lead_num) {
3113 argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3114 }
3115
3116 //VM_ASSERT(ci == calling->cd->ci);
3117 VM_ASSERT(cc == calling->cc);
3118
3119 if (vm_call_iseq_optimizable_p(ci, cc)) {
3120 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3121 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3122 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3123 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3124 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3125 }
3126 else {
3127 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3128 }
3129 }
3130 return 0;
3131 }
3132 else if (rb_iseq_only_optparam_p(iseq)) {
3133 rb_control_frame_t *cfp = ec->cfp;
3134
3135 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3136 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3137
3138 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3139 const int argc = calling->argc;
3140 const int opt = argc - lead_num;
3141
3142 if (opt < 0 || opt > opt_num) {
3143 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3144 }
3145
3146 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3147 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3148 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3149 vm_call_cacheable(ci, cc));
3150 }
3151 else {
3152 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3153 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3154 vm_call_cacheable(ci, cc));
3155 }
3156
3157 /* initialize opt vars for self-references */
3158 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3159 for (int i=argc; i<lead_num + opt_num; i++) {
3160 argv[i] = Qnil;
3161 }
3162 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3163 }
3164 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3165 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3166 const int argc = calling->argc;
3167 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3168
3169 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3170 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3171
3172 if (argc - kw_arg->keyword_len == lead_num) {
3173 const int ci_kw_len = kw_arg->keyword_len;
3174 const VALUE * const ci_keywords = kw_arg->keywords;
3175 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3176 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3177
3178 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3179 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3180
3181 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3182 vm_call_cacheable(ci, cc));
3183
3184 return 0;
3185 }
3186 }
3187 else if (argc == lead_num) {
3188 /* no kwarg */
3189 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3190 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3191
3192 if (klocals[kw_param->num] == INT2FIX(0)) {
3193 /* copy from default_values */
3194 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3195 vm_call_cacheable(ci, cc));
3196 }
3197
3198 return 0;
3199 }
3200 }
3201 }
3202
3203 // Called iseq is using ... param
3204 // def foo(...) # <- iseq for foo will have "forwardable"
3205 //
3206 // We want to set the `...` local to the caller's CI
3207 // foo(1, 2) # <- the ci for this should end up as `...`
3208 //
3209 // So hopefully the stack looks like:
3210 //
3211 // => 1
3212 // => 2
3213 // => *
3214 // => **
3215 // => &
3216 // => ... # <- points at `foo`s CI
3217 // => cref_or_me
3218 // => specval
3219 // => type
3220 //
3221 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3222 bool can_fastpath = true;
3223
3224 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3225 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3226 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3227 ci = vm_ci_new_runtime(
3228 vm_ci_mid(ci),
3229 vm_ci_flag(ci),
3230 vm_ci_argc(ci),
3231 vm_ci_kwarg(ci));
3232 }
3233 else {
3234 ci = forward_cd->caller_ci;
3235 }
3236 can_fastpath = false;
3237 }
3238 // C functions calling iseqs will stack allocate a CI,
3239 // so we need to convert it to heap allocated
3240 if (!vm_ci_markable(ci)) {
3241 ci = vm_ci_new_runtime(
3242 vm_ci_mid(ci),
3243 vm_ci_flag(ci),
3244 vm_ci_argc(ci),
3245 vm_ci_kwarg(ci));
3246 can_fastpath = false;
3247 }
3248 argv[param_size - 1] = (VALUE)ci;
3249 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3250 return 0;
3251 }
3252
3253 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3254}
3255
3256static void
3257vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3258{
3259 // This case is when the caller is using a ... parameter.
3260 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3261 // In this case the caller's caller's CI will be on the stack.
3262 //
3263 // For example:
3264 //
3265 // def bar(a, b); a + b; end
3266 // def foo(...); bar(...); end
3267 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3268 //
3269 // Stack layout will be:
3270 //
3271 // > 1
3272 // > 2
3273 // > CI for foo(1, 2)
3274 // > cref_or_me
3275 // > specval
3276 // > type
3277 // > receiver
3278 // > CI for foo(1, 2), via `getlocal ...`
3279 // > ( SP points here )
3280 const VALUE * lep = VM_CF_LEP(cfp);
3281
3282 const rb_iseq_t *iseq;
3283
3284 // If we're in an escaped environment (lambda for example), get the iseq
3285 // from the captured env.
3286 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3287 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3288 iseq = env->iseq;
3289 }
3290 else { // Otherwise use the lep to find the caller
3291 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3292 }
3293
3294 // Our local storage is below the args we need to copy
3295 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3296
3297 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3298 VALUE * to = cfp->sp - 1; // clobber the CI
3299
3300 if (RTEST(splat)) {
3301 to -= 1; // clobber the splat array
3302 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3303 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3304 to += RARRAY_LEN(splat);
3305 }
3306
3307 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3308 MEMCPY(to, from, VALUE, argc);
3309 cfp->sp = to + argc;
3310
3311 // Stack layout should now be:
3312 //
3313 // > 1
3314 // > 2
3315 // > CI for foo(1, 2)
3316 // > cref_or_me
3317 // > specval
3318 // > type
3319 // > receiver
3320 // > 1
3321 // > 2
3322 // > ( SP points here )
3323}
3324
3325static VALUE
3326vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3327{
3328 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3329
3330 const struct rb_callcache *cc = calling->cc;
3331 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3332 int param_size = ISEQ_BODY(iseq)->param.size;
3333 int local_size = ISEQ_BODY(iseq)->local_table_size;
3334
3335 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3336
3337 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3338 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3339}
3340
3341static VALUE
3342vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3343{
3344 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3345
3346 const struct rb_callcache *cc = calling->cc;
3347 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3348 int param_size = ISEQ_BODY(iseq)->param.size;
3349 int local_size = ISEQ_BODY(iseq)->local_table_size;
3350
3351 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3352
3353 // Setting up local size and param size
3354 local_size = local_size + vm_ci_argc(calling->cd->ci);
3355 param_size = param_size + vm_ci_argc(calling->cd->ci);
3356
3357 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3358 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3359}
3360
3361static inline VALUE
3362vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3363 int opt_pc, int param_size, int local_size)
3364{
3365 const struct rb_callinfo *ci = calling->cd->ci;
3366 const struct rb_callcache *cc = calling->cc;
3367
3368 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3369 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3370 }
3371 else {
3372 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3373 }
3374}
3375
3376static inline VALUE
3377vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3378 int opt_pc, int param_size, int local_size)
3379{
3380 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3381 VALUE *argv = cfp->sp - calling->argc;
3382 VALUE *sp = argv + param_size;
3383 cfp->sp = argv - 1 /* recv */;
3384
3385 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3386 calling->block_handler, (VALUE)me,
3387 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3388 local_size - param_size,
3389 ISEQ_BODY(iseq)->stack_max);
3390 return Qundef;
3391}
3392
3393static inline VALUE
3394vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3395{
3396 const struct rb_callcache *cc = calling->cc;
3397 unsigned int i;
3398 VALUE *argv = cfp->sp - calling->argc;
3399 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3400 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3401 VALUE *src_argv = argv;
3402 VALUE *sp_orig, *sp;
3403 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3404
3405 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3406 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3407 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3408 dst_captured->code.val = src_captured->code.val;
3409 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3410 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3411 }
3412 else {
3413 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3414 }
3415 }
3416
3417 vm_pop_frame(ec, cfp, cfp->ep);
3418 cfp = ec->cfp;
3419
3420 sp_orig = sp = cfp->sp;
3421
3422 /* push self */
3423 sp[0] = calling->recv;
3424 sp++;
3425
3426 /* copy arguments */
3427 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3428 *sp++ = src_argv[i];
3429 }
3430
3431 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3432 calling->recv, calling->block_handler, (VALUE)me,
3433 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3434 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3435 ISEQ_BODY(iseq)->stack_max);
3436
3437 cfp->sp = sp_orig;
3438
3439 return Qundef;
3440}
3441
3442static void
3443ractor_unsafe_check(void)
3444{
3445 if (!rb_ractor_main_p()) {
3446 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3447 }
3448}
3449
3450static VALUE
3451call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3452{
3453 ractor_unsafe_check();
3454 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3455 return (*f)(recv, rb_ary_new4(argc, argv));
3456}
3457
3458static VALUE
3459call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3460{
3461 ractor_unsafe_check();
3462 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3463 return (*f)(argc, argv, recv);
3464}
3465
3466static VALUE
3467call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3468{
3469 ractor_unsafe_check();
3470 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3471 return (*f)(recv);
3472}
3473
3474static VALUE
3475call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3476{
3477 ractor_unsafe_check();
3478 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3479 return (*f)(recv, argv[0]);
3480}
3481
3482static VALUE
3483call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3484{
3485 ractor_unsafe_check();
3486 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3487 return (*f)(recv, argv[0], argv[1]);
3488}
3489
3490static VALUE
3491call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3492{
3493 ractor_unsafe_check();
3494 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3495 return (*f)(recv, argv[0], argv[1], argv[2]);
3496}
3497
3498static VALUE
3499call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3500{
3501 ractor_unsafe_check();
3502 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3503 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3504}
3505
3506static VALUE
3507call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3508{
3509 ractor_unsafe_check();
3510 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3511 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3512}
3513
3514static VALUE
3515call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3516{
3517 ractor_unsafe_check();
3519 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3520}
3521
3522static VALUE
3523call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3524{
3525 ractor_unsafe_check();
3527 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3528}
3529
3530static VALUE
3531call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3532{
3533 ractor_unsafe_check();
3535 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3536}
3537
3538static VALUE
3539call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3540{
3541 ractor_unsafe_check();
3543 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3544}
3545
3546static VALUE
3547call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3548{
3549 ractor_unsafe_check();
3551 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3552}
3553
3554static VALUE
3555call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3556{
3557 ractor_unsafe_check();
3559 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3560}
3561
3562static VALUE
3563call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3564{
3565 ractor_unsafe_check();
3567 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3568}
3569
3570static VALUE
3571call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3572{
3573 ractor_unsafe_check();
3575 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3576}
3577
3578static VALUE
3579call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3580{
3581 ractor_unsafe_check();
3583 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3584}
3585
3586static VALUE
3587call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3588{
3589 ractor_unsafe_check();
3591 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3592}
3593
3594static VALUE
3595ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3596{
3597 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3598 return (*f)(recv, rb_ary_new4(argc, argv));
3599}
3600
3601static VALUE
3602ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3603{
3604 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3605 return (*f)(argc, argv, recv);
3606}
3607
3608static VALUE
3609ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3610{
3611 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3612 return (*f)(recv);
3613}
3614
3615static VALUE
3616ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3617{
3618 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3619 return (*f)(recv, argv[0]);
3620}
3621
3622static VALUE
3623ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3624{
3625 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3626 return (*f)(recv, argv[0], argv[1]);
3627}
3628
3629static VALUE
3630ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3631{
3632 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3633 return (*f)(recv, argv[0], argv[1], argv[2]);
3634}
3635
3636static VALUE
3637ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3638{
3639 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3640 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3641}
3642
3643static VALUE
3644ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3645{
3646 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3647 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3648}
3649
3650static VALUE
3651ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3652{
3654 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3655}
3656
3657static VALUE
3658ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3659{
3661 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3662}
3663
3664static VALUE
3665ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3666{
3668 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3669}
3670
3671static VALUE
3672ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3673{
3675 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3676}
3677
3678static VALUE
3679ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3680{
3682 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3683}
3684
3685static VALUE
3686ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3687{
3689 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3690}
3691
3692static VALUE
3693ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3694{
3696 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3697}
3698
3699static VALUE
3700ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3701{
3703 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3704}
3705
3706static VALUE
3707ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3708{
3710 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3711}
3712
3713static VALUE
3714ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3715{
3717 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3718}
3719
3720static inline int
3721vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3722{
3723 const int ov_flags = RAISED_STACKOVERFLOW;
3724 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3725 if (rb_ec_raised_p(ec, ov_flags)) {
3726 rb_ec_raised_reset(ec, ov_flags);
3727 return TRUE;
3728 }
3729 return FALSE;
3730}
3731
3732#define CHECK_CFP_CONSISTENCY(func) \
3733 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3734 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3735
3736static inline
3737const rb_method_cfunc_t *
3738vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3739{
3740#if VM_DEBUG_VERIFY_METHOD_CACHE
3741 switch (me->def->type) {
3742 case VM_METHOD_TYPE_CFUNC:
3743 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3744 break;
3745# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3746 METHOD_BUG(ISEQ);
3747 METHOD_BUG(ATTRSET);
3748 METHOD_BUG(IVAR);
3749 METHOD_BUG(BMETHOD);
3750 METHOD_BUG(ZSUPER);
3751 METHOD_BUG(UNDEF);
3752 METHOD_BUG(OPTIMIZED);
3753 METHOD_BUG(MISSING);
3754 METHOD_BUG(REFINED);
3755 METHOD_BUG(ALIAS);
3756# undef METHOD_BUG
3757 default:
3758 rb_bug("wrong method type: %d", me->def->type);
3759 }
3760#endif
3761 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3762}
3763
3764static VALUE
3765vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3766 int argc, VALUE *argv, VALUE *stack_bottom)
3767{
3768 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3769 const struct rb_callinfo *ci = calling->cd->ci;
3770 const struct rb_callcache *cc = calling->cc;
3771 VALUE val;
3772 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3773 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3774
3775 VALUE recv = calling->recv;
3776 VALUE block_handler = calling->block_handler;
3777 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3778
3779 if (UNLIKELY(calling->kw_splat)) {
3780 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3781 }
3782
3783 VM_ASSERT(reg_cfp == ec->cfp);
3784
3785 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3786 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3787
3788 vm_push_frame(ec, NULL, frame_type, recv,
3789 block_handler, (VALUE)me,
3790 0, ec->cfp->sp, 0, 0);
3791
3792 int len = cfunc->argc;
3793 if (len >= 0) rb_check_arity(argc, len, len);
3794
3795 reg_cfp->sp = stack_bottom;
3796 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3797
3798 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3799
3800 rb_vm_pop_frame(ec);
3801
3802 VM_ASSERT(ec->cfp->sp == stack_bottom);
3803
3804 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3805 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3806
3807 return val;
3808}
3809
3810// Push a C method frame for a given cme. This is called when JIT code skipped
3811// pushing a frame but the C method reached a point where a frame is needed.
3812void
3813rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3814{
3815 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3816 rb_execution_context_t *ec = GET_EC();
3817 VALUE *sp = ec->cfp->sp;
3818 VALUE recv = *(sp - recv_idx - 1);
3819 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3820 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3821#if VM_CHECK_MODE > 0
3822 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3823 *(GET_EC()->cfp->sp) = Qfalse;
3824#endif
3825 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3826}
3827
3828// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3829bool
3830rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3831{
3832 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3833}
3834
3835static VALUE
3836vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3837{
3838 int argc = calling->argc;
3839 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3840 VALUE *argv = &stack_bottom[1];
3841
3842 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3843}
3844
3845static VALUE
3846vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3847{
3848 const struct rb_callinfo *ci = calling->cd->ci;
3849 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3850
3851 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3852 VALUE argv_ary;
3853 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3854 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3855 int argc = RARRAY_LENINT(argv_ary);
3856 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3857 VALUE *stack_bottom = reg_cfp->sp - 2;
3858
3859 VM_ASSERT(calling->argc == 1);
3860 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3861 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3862
3863 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3864 }
3865 else {
3866 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3867
3868 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3869 }
3870}
3871
3872static inline VALUE
3873vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3874{
3875 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3876 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3877
3878 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3879 return vm_call_cfunc_other(ec, reg_cfp, calling);
3880 }
3881
3882 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3883 calling->kw_splat = 0;
3884 int i;
3885 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3886 VALUE *sp = stack_bottom;
3887 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3888 for(i = 0; i < argc; i++) {
3889 *++sp = argv[i];
3890 }
3891 reg_cfp->sp = sp+1;
3892
3893 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3894}
3895
3896static inline VALUE
3897vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3898{
3899 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3900 VALUE argv_ary = reg_cfp->sp[-1];
3901 int argc = RARRAY_LENINT(argv_ary);
3902 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3903 VALUE last_hash;
3904 int argc_offset = 0;
3905
3906 if (UNLIKELY(argc > 0 &&
3907 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3908 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3909 if (!RHASH_EMPTY_P(last_hash)) {
3910 return vm_call_cfunc_other(ec, reg_cfp, calling);
3911 }
3912 argc_offset++;
3913 }
3914 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3915}
3916
3917static inline VALUE
3918vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3919{
3920 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3921 VALUE keyword_hash = reg_cfp->sp[-1];
3922
3923 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3924 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3925 }
3926
3927 return vm_call_cfunc_other(ec, reg_cfp, calling);
3928}
3929
3930static VALUE
3931vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3932{
3933 const struct rb_callinfo *ci = calling->cd->ci;
3934 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3935
3936 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3937 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3938 // f(*a)
3939 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3940 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3941 }
3942 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3943 // f(*a, **kw)
3944 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3945 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3946 }
3947 }
3948
3949 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3950 return vm_call_cfunc_other(ec, reg_cfp, calling);
3951}
3952
3953static VALUE
3954vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3955{
3956 const struct rb_callcache *cc = calling->cc;
3957 RB_DEBUG_COUNTER_INC(ccf_ivar);
3958 cfp->sp -= 1;
3959 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3960 return ivar;
3961}
3962
3963static VALUE
3964vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3965{
3966 RB_DEBUG_COUNTER_INC(ccf_attrset);
3967 VALUE val = *(cfp->sp - 1);
3968 cfp->sp -= 2;
3969 attr_index_t index = vm_cc_attr_index(cc);
3970 shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3971 ID id = vm_cc_cme(cc)->def->body.attr.id;
3972 rb_check_frozen(obj);
3973 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3974 if (UNDEF_P(res)) {
3975 switch (BUILTIN_TYPE(obj)) {
3976 case T_OBJECT:
3977 case T_CLASS:
3978 case T_MODULE:
3979 break;
3980 default:
3981 {
3982 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3983 if (!UNDEF_P(res)) {
3984 return res;
3985 }
3986 }
3987 }
3988 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3989 }
3990 return res;
3991}
3992
3993static VALUE
3994vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3995{
3996 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3997}
3998
3999static inline VALUE
4000vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4001{
4002 rb_proc_t *proc;
4003 VALUE val;
4004 const struct rb_callcache *cc = calling->cc;
4005 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4006 VALUE procv = cme->def->body.bmethod.proc;
4007
4008 if (!RB_OBJ_SHAREABLE_P(procv) &&
4009 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4010 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4011 }
4012
4013 /* control block frame */
4014 GetProcPtr(procv, proc);
4015 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4016
4017 return val;
4018}
4019
4020static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4021
4022static VALUE
4023vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4024{
4025 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4026
4027 const struct rb_callcache *cc = calling->cc;
4028 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4029 VALUE procv = cme->def->body.bmethod.proc;
4030
4031 if (!RB_OBJ_SHAREABLE_P(procv) &&
4032 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4033 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4034 }
4035
4036 rb_proc_t *proc;
4037 GetProcPtr(procv, proc);
4038 const struct rb_block *block = &proc->block;
4039
4040 while (vm_block_type(block) == block_type_proc) {
4041 block = vm_proc_block(block->as.proc);
4042 }
4043 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4044
4045 const struct rb_captured_block *captured = &block->as.captured;
4046 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4047 VALUE * const argv = cfp->sp - calling->argc;
4048 const int arg_size = ISEQ_BODY(iseq)->param.size;
4049
4050 int opt_pc;
4051 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4052 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4053 }
4054 else {
4055 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4056 }
4057
4058 cfp->sp = argv - 1; // -1 for the receiver
4059
4060 vm_push_frame(ec, iseq,
4061 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4062 calling->recv,
4063 VM_GUARDED_PREV_EP(captured->ep),
4064 (VALUE)cme,
4065 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4066 argv + arg_size,
4067 ISEQ_BODY(iseq)->local_table_size - arg_size,
4068 ISEQ_BODY(iseq)->stack_max);
4069
4070 return Qundef;
4071}
4072
4073static VALUE
4074vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4075{
4076 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4077
4078 VALUE *argv;
4079 int argc;
4080 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4081 if (UNLIKELY(calling->heap_argv)) {
4082 argv = RARRAY_PTR(calling->heap_argv);
4083 cfp->sp -= 2;
4084 }
4085 else {
4086 argc = calling->argc;
4087 argv = ALLOCA_N(VALUE, argc);
4088 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4089 cfp->sp += - argc - 1;
4090 }
4091
4092 return vm_call_bmethod_body(ec, calling, argv);
4093}
4094
4095static VALUE
4096vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4097{
4098 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4099
4100 const struct rb_callcache *cc = calling->cc;
4101 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4102 VALUE procv = cme->def->body.bmethod.proc;
4103 rb_proc_t *proc;
4104 GetProcPtr(procv, proc);
4105 const struct rb_block *block = &proc->block;
4106
4107 while (vm_block_type(block) == block_type_proc) {
4108 block = vm_proc_block(block->as.proc);
4109 }
4110 if (vm_block_type(block) == block_type_iseq) {
4111 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4112 return vm_call_iseq_bmethod(ec, cfp, calling);
4113 }
4114
4115 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4116 return vm_call_noniseq_bmethod(ec, cfp, calling);
4117}
4118
4119VALUE
4120rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4121{
4122 VALUE klass = current_class;
4123
4124 /* for prepended Module, then start from cover class */
4125 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
4126 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4127 klass = RBASIC_CLASS(klass);
4128 }
4129
4130 while (RTEST(klass)) {
4131 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4132 if (owner == target_owner) {
4133 return klass;
4134 }
4135 klass = RCLASS_SUPER(klass);
4136 }
4137
4138 return current_class; /* maybe module function */
4139}
4140
4141static const rb_callable_method_entry_t *
4142aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4143{
4144 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4145 const rb_callable_method_entry_t *cme;
4146
4147 if (orig_me->defined_class == 0) {
4148 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4149 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4150 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4151
4152 if (me->def->reference_count == 1) {
4153 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4154 }
4155 else {
4157 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4158 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4159 }
4160 }
4161 else {
4162 cme = (const rb_callable_method_entry_t *)orig_me;
4163 }
4164
4165 VM_ASSERT(callable_method_entry_p(cme));
4166 return cme;
4167}
4168
4170rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4171{
4172 return aliased_callable_method_entry(me);
4173}
4174
4175static VALUE
4176vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4177{
4178 calling->cc = &VM_CC_ON_STACK(Qundef,
4179 vm_call_general,
4180 {{0}},
4181 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4182
4183 return vm_call_method_each_type(ec, cfp, calling);
4184}
4185
4186static enum method_missing_reason
4187ci_missing_reason(const struct rb_callinfo *ci)
4188{
4189 enum method_missing_reason stat = MISSING_NOENTRY;
4190 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4191 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4192 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4193 return stat;
4194}
4195
4196static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4197
4198static VALUE
4199vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4200 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4201{
4202 ASSUME(calling->argc >= 0);
4203
4204 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4205 int argc = calling->argc;
4206 VALUE recv = calling->recv;
4207 VALUE klass = CLASS_OF(recv);
4208 ID mid = rb_check_id(&symbol);
4209 flags |= VM_CALL_OPT_SEND;
4210
4211 if (UNLIKELY(! mid)) {
4212 mid = idMethodMissing;
4213 missing_reason = ci_missing_reason(ci);
4214 ec->method_missing_reason = missing_reason;
4215
4216 VALUE argv_ary;
4217 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4218 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4219 rb_ary_unshift(argv_ary, symbol);
4220
4221 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4222 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4223 VALUE exc = rb_make_no_method_exception(
4224 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4225
4226 rb_exc_raise(exc);
4227 }
4228 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4229 }
4230 else {
4231 /* E.g. when argc == 2
4232 *
4233 * | | | | TOPN
4234 * | | +------+
4235 * | | +---> | arg1 | 0
4236 * +------+ | +------+
4237 * | arg1 | -+ +-> | arg0 | 1
4238 * +------+ | +------+
4239 * | arg0 | ---+ | sym | 2
4240 * +------+ +------+
4241 * | recv | | recv | 3
4242 * --+------+--------+------+------
4243 */
4244 int i = argc;
4245 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4246 INC_SP(1);
4247 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4248 argc = ++calling->argc;
4249
4250 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4251 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4252 TOPN(i) = symbol;
4253 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4254 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4255 VALUE exc = rb_make_no_method_exception(
4256 rb_eNoMethodError, 0, recv, argc, argv, priv);
4257
4258 rb_exc_raise(exc);
4259 }
4260 else {
4261 TOPN(i) = rb_str_intern(symbol);
4262 }
4263 }
4264 }
4265
4266 struct rb_forwarding_call_data new_fcd = {
4267 .cd = {
4268 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4269 .cc = NULL,
4270 },
4271 .caller_ci = NULL,
4272 };
4273
4274 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4275 calling->cd = &new_fcd.cd;
4276 }
4277 else {
4278 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4279 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4280 new_fcd.caller_ci = caller_ci;
4281 calling->cd = (struct rb_call_data *)&new_fcd;
4282 }
4283 calling->cc = &VM_CC_ON_STACK(klass,
4284 vm_call_general,
4285 { .method_missing_reason = missing_reason },
4286 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4287
4288 if (flags & VM_CALL_FCALL) {
4289 return vm_call_method(ec, reg_cfp, calling);
4290 }
4291
4292 const struct rb_callcache *cc = calling->cc;
4293 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4294
4295 if (vm_cc_cme(cc) != NULL) {
4296 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4297 case METHOD_VISI_PUBLIC: /* likely */
4298 return vm_call_method_each_type(ec, reg_cfp, calling);
4299 case METHOD_VISI_PRIVATE:
4300 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4301 break;
4302 case METHOD_VISI_PROTECTED:
4303 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4304 break;
4305 default:
4306 VM_UNREACHABLE(vm_call_method);
4307 }
4308 return vm_call_method_missing(ec, reg_cfp, calling);
4309 }
4310
4311 return vm_call_method_nome(ec, reg_cfp, calling);
4312}
4313
4314static VALUE
4315vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4316{
4317 const struct rb_callinfo *ci = calling->cd->ci;
4318 int i;
4319 VALUE sym;
4320
4321 i = calling->argc - 1;
4322
4323 if (calling->argc == 0) {
4324 rb_raise(rb_eArgError, "no method name given");
4325 }
4326
4327 sym = TOPN(i);
4328 /* E.g. when i == 2
4329 *
4330 * | | | | TOPN
4331 * +------+ | |
4332 * | arg1 | ---+ | | 0
4333 * +------+ | +------+
4334 * | arg0 | -+ +-> | arg1 | 1
4335 * +------+ | +------+
4336 * | sym | +---> | arg0 | 2
4337 * +------+ +------+
4338 * | recv | | recv | 3
4339 * --+------+--------+------+------
4340 */
4341 /* shift arguments */
4342 if (i > 0) {
4343 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4344 }
4345 calling->argc -= 1;
4346 DEC_SP(1);
4347
4348 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4349}
4350
4351static VALUE
4352vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4353{
4354 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4355 const struct rb_callinfo *ci = calling->cd->ci;
4356 int flags = VM_CALL_FCALL;
4357 VALUE sym;
4358
4359 VALUE argv_ary;
4360 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4361 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4362 sym = rb_ary_shift(argv_ary);
4363 flags |= VM_CALL_ARGS_SPLAT;
4364 if (calling->kw_splat) {
4365 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4366 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4367 calling->kw_splat = 0;
4368 }
4369 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4370 }
4371
4372 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4373 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4374}
4375
4376static VALUE
4377vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4378{
4379 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4380 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4381}
4382
4383static VALUE
4384vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4385{
4386 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4387
4388 const struct rb_callinfo *ci = calling->cd->ci;
4389 int flags = vm_ci_flag(ci);
4390
4391 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4392 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4393 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4394 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4395 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4396 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4397 }
4398
4399 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4400 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4401}
4402
4403static VALUE
4404vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4405 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4406{
4407 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4408
4409 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4410 unsigned int argc, flag;
4411
4412 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4413 argc = ++calling->argc;
4414
4415 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4416 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4417 vm_check_canary(ec, reg_cfp->sp);
4418 if (argc > 1) {
4419 MEMMOVE(argv+1, argv, VALUE, argc-1);
4420 }
4421 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4422 INC_SP(1);
4423
4424 ec->method_missing_reason = reason;
4425
4426 struct rb_forwarding_call_data new_fcd = {
4427 .cd = {
4428 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4429 .cc = NULL,
4430 },
4431 .caller_ci = NULL,
4432 };
4433
4434 if (!(flag & VM_CALL_FORWARDING)) {
4435 calling->cd = &new_fcd.cd;
4436 }
4437 else {
4438 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4439 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4440 new_fcd.caller_ci = caller_ci;
4441 calling->cd = (struct rb_call_data *)&new_fcd;
4442 }
4443
4444 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4445 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4446 return vm_call_method(ec, reg_cfp, calling);
4447}
4448
4449static VALUE
4450vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4451{
4452 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4453}
4454
4455static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4456static VALUE
4457vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4458{
4459 klass = RCLASS_SUPER(klass);
4460
4461 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4462 if (cme == NULL) {
4463 return vm_call_method_nome(ec, cfp, calling);
4464 }
4465 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4466 cme->def->body.refined.orig_me) {
4467 cme = refined_method_callable_without_refinement(cme);
4468 }
4469
4470 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4471
4472 return vm_call_method_each_type(ec, cfp, calling);
4473}
4474
4475static inline VALUE
4476find_refinement(VALUE refinements, VALUE klass)
4477{
4478 if (NIL_P(refinements)) {
4479 return Qnil;
4480 }
4481 return rb_hash_lookup(refinements, klass);
4482}
4483
4484PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4485static rb_control_frame_t *
4486current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4487{
4488 rb_control_frame_t *top_cfp = cfp;
4489
4490 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4491 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4492
4493 do {
4494 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4495 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4496 /* TODO: orphan block */
4497 return top_cfp;
4498 }
4499 } while (cfp->iseq != local_iseq);
4500 }
4501 return cfp;
4502}
4503
4504static const rb_callable_method_entry_t *
4505refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4506{
4507 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4508 const rb_callable_method_entry_t *cme;
4509
4510 if (orig_me->defined_class == 0) {
4511 cme = NULL;
4513 }
4514 else {
4515 cme = (const rb_callable_method_entry_t *)orig_me;
4516 }
4517
4518 VM_ASSERT(callable_method_entry_p(cme));
4519
4520 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4521 cme = NULL;
4522 }
4523
4524 return cme;
4525}
4526
4527static const rb_callable_method_entry_t *
4528search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4529{
4530 ID mid = vm_ci_mid(calling->cd->ci);
4531 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4532 const struct rb_callcache * const cc = calling->cc;
4533 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4534
4535 for (; cref; cref = CREF_NEXT(cref)) {
4536 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4537 if (NIL_P(refinement)) continue;
4538
4539 const rb_callable_method_entry_t *const ref_me =
4540 rb_callable_method_entry(refinement, mid);
4541
4542 if (ref_me) {
4543 if (vm_cc_call(cc) == vm_call_super_method) {
4544 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4545 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4546 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4547 continue;
4548 }
4549 }
4550
4551 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4552 cme->def != ref_me->def) {
4553 cme = ref_me;
4554 }
4555 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4556 return cme;
4557 }
4558 }
4559 else {
4560 return NULL;
4561 }
4562 }
4563
4564 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4565 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4566 }
4567 else {
4568 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4569 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4570 return cme;
4571 }
4572}
4573
4574static VALUE
4575vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4576{
4577 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4578
4579 if (ref_cme) {
4580 if (calling->cd->cc) {
4581 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4582 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4583 return vm_call_method(ec, cfp, calling);
4584 }
4585 else {
4586 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4587 calling->cc= ref_cc;
4588 return vm_call_method(ec, cfp, calling);
4589 }
4590 }
4591 else {
4592 return vm_call_method_nome(ec, cfp, calling);
4593 }
4594}
4595
4596static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4597
4598NOINLINE(static VALUE
4599 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4600 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4601
4602static VALUE
4603vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4604 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4605{
4606 int argc = calling->argc;
4607
4608 /* remove self */
4609 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4610 DEC_SP(1);
4611
4612 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4613}
4614
4615static VALUE
4616vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4617{
4618 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4619
4620 const struct rb_callinfo *ci = calling->cd->ci;
4621 VALUE procval = calling->recv;
4622 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4623}
4624
4625static VALUE
4626vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4627{
4628 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4629
4630 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4631 const struct rb_callinfo *ci = calling->cd->ci;
4632
4633 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4634 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4635 }
4636 else {
4637 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4638 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4639 return vm_call_general(ec, reg_cfp, calling);
4640 }
4641}
4642
4643static VALUE
4644vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4645{
4646 VALUE recv = calling->recv;
4647
4648 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4649 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4650 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4651
4652 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4653 return internal_RSTRUCT_GET(recv, off);
4654}
4655
4656static VALUE
4657vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4658{
4659 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4660
4661 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4662 reg_cfp->sp -= 1;
4663 return ret;
4664}
4665
4666static VALUE
4667vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4668{
4669 VALUE recv = calling->recv;
4670
4671 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4672 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4673 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4674
4675 rb_check_frozen(recv);
4676
4677 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4678 internal_RSTRUCT_SET(recv, off, val);
4679
4680 return val;
4681}
4682
4683static VALUE
4684vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4685{
4686 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4687
4688 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4689 reg_cfp->sp -= 2;
4690 return ret;
4691}
4692
4693NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4694 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4695
4696#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4697 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4698 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4699 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4700 var = func; \
4701 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4702 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4703 } \
4704 else { \
4705 nohook; \
4706 var = func; \
4707 }
4708
4709static VALUE
4710vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4711 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4712{
4713 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4714 case OPTIMIZED_METHOD_TYPE_SEND:
4715 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4716 return vm_call_opt_send(ec, cfp, calling);
4717 case OPTIMIZED_METHOD_TYPE_CALL:
4718 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4719 return vm_call_opt_call(ec, cfp, calling);
4720 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4721 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4722 return vm_call_opt_block_call(ec, cfp, calling);
4723 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4724 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4725 rb_check_arity(calling->argc, 0, 0);
4726
4727 VALUE v;
4728 VM_CALL_METHOD_ATTR(v,
4729 vm_call_opt_struct_aref(ec, cfp, calling),
4730 set_vm_cc_ivar(cc); \
4731 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4732 return v;
4733 }
4734 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4735 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4736 rb_check_arity(calling->argc, 1, 1);
4737
4738 VALUE v;
4739 VM_CALL_METHOD_ATTR(v,
4740 vm_call_opt_struct_aset(ec, cfp, calling),
4741 set_vm_cc_ivar(cc); \
4742 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4743 return v;
4744 }
4745 default:
4746 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4747 }
4748}
4749
4750static VALUE
4751vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4752{
4753 const struct rb_callinfo *ci = calling->cd->ci;
4754 const struct rb_callcache *cc = calling->cc;
4755 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4756 VALUE v;
4757
4758 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4759
4760 switch (cme->def->type) {
4761 case VM_METHOD_TYPE_ISEQ:
4762 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4763 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4764 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4765 }
4766 else {
4767 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4768 return vm_call_iseq_setup(ec, cfp, calling);
4769 }
4770
4771 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4772 case VM_METHOD_TYPE_CFUNC:
4773 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4774 return vm_call_cfunc(ec, cfp, calling);
4775
4776 case VM_METHOD_TYPE_ATTRSET:
4777 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4778
4779 rb_check_arity(calling->argc, 1, 1);
4780
4781 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4782
4783 if (vm_cc_markable(cc)) {
4784 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4785 VM_CALL_METHOD_ATTR(v,
4786 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4787 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4788 }
4789 else {
4790 cc = &((struct rb_callcache) {
4791 .flags = T_IMEMO |
4792 (imemo_callcache << FL_USHIFT) |
4793 VM_CALLCACHE_UNMARKABLE |
4794 VM_CALLCACHE_ON_STACK,
4795 .klass = cc->klass,
4796 .cme_ = cc->cme_,
4797 .call_ = cc->call_,
4798 .aux_ = {
4799 .attr = {
4800 .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4801 }
4802 },
4803 });
4804
4805 VM_CALL_METHOD_ATTR(v,
4806 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4807 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4808 }
4809 return v;
4810
4811 case VM_METHOD_TYPE_IVAR:
4812 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4813 rb_check_arity(calling->argc, 0, 0);
4814 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4815 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4816 VM_CALL_METHOD_ATTR(v,
4817 vm_call_ivar(ec, cfp, calling),
4818 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4819 return v;
4820
4821 case VM_METHOD_TYPE_MISSING:
4822 vm_cc_method_missing_reason_set(cc, 0);
4823 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4824 return vm_call_method_missing(ec, cfp, calling);
4825
4826 case VM_METHOD_TYPE_BMETHOD:
4827 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4828 return vm_call_bmethod(ec, cfp, calling);
4829
4830 case VM_METHOD_TYPE_ALIAS:
4831 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4832 return vm_call_alias(ec, cfp, calling);
4833
4834 case VM_METHOD_TYPE_OPTIMIZED:
4835 return vm_call_optimized(ec, cfp, calling, ci, cc);
4836
4837 case VM_METHOD_TYPE_UNDEF:
4838 break;
4839
4840 case VM_METHOD_TYPE_ZSUPER:
4841 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4842
4843 case VM_METHOD_TYPE_REFINED:
4844 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4845 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4846 return vm_call_refined(ec, cfp, calling);
4847 }
4848
4849 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4850}
4851
4852NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4853
4854static VALUE
4855vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4856{
4857 /* method missing */
4858 const struct rb_callinfo *ci = calling->cd->ci;
4859 const int stat = ci_missing_reason(ci);
4860
4861 if (vm_ci_mid(ci) == idMethodMissing) {
4862 if (UNLIKELY(calling->heap_argv)) {
4863 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4864 }
4865 else {
4866 rb_control_frame_t *reg_cfp = cfp;
4867 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4868 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4869 }
4870 }
4871 else {
4872 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4873 }
4874}
4875
4876/* Protected method calls and super invocations need to check that the receiver
4877 * (self for super) inherits the module on which the method is defined.
4878 * In the case of refinements, it should consider the original class not the
4879 * refinement.
4880 */
4881static VALUE
4882vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4883{
4884 VALUE defined_class = me->defined_class;
4885 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4886 return NIL_P(refined_class) ? defined_class : refined_class;
4887}
4888
4889static inline VALUE
4890vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4891{
4892 const struct rb_callinfo *ci = calling->cd->ci;
4893 const struct rb_callcache *cc = calling->cc;
4894
4895 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4896
4897 if (vm_cc_cme(cc) != NULL) {
4898 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4899 case METHOD_VISI_PUBLIC: /* likely */
4900 return vm_call_method_each_type(ec, cfp, calling);
4901
4902 case METHOD_VISI_PRIVATE:
4903 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4904 enum method_missing_reason stat = MISSING_PRIVATE;
4905 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4906
4907 vm_cc_method_missing_reason_set(cc, stat);
4908 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4909 return vm_call_method_missing(ec, cfp, calling);
4910 }
4911 return vm_call_method_each_type(ec, cfp, calling);
4912
4913 case METHOD_VISI_PROTECTED:
4914 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4915 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4916 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4917 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4918 return vm_call_method_missing(ec, cfp, calling);
4919 }
4920 else {
4921 /* caching method info to dummy cc */
4922 VM_ASSERT(vm_cc_cme(cc) != NULL);
4923 struct rb_callcache cc_on_stack = *cc;
4924 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4925 calling->cc = &cc_on_stack;
4926 return vm_call_method_each_type(ec, cfp, calling);
4927 }
4928 }
4929 return vm_call_method_each_type(ec, cfp, calling);
4930
4931 default:
4932 rb_bug("unreachable");
4933 }
4934 }
4935 else {
4936 return vm_call_method_nome(ec, cfp, calling);
4937 }
4938}
4939
4940static VALUE
4941vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4942{
4943 RB_DEBUG_COUNTER_INC(ccf_general);
4944 return vm_call_method(ec, reg_cfp, calling);
4945}
4946
4947void
4948rb_vm_cc_general(const struct rb_callcache *cc)
4949{
4950 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4951 VM_ASSERT(cc != vm_cc_empty());
4952
4953 *(vm_call_handler *)&cc->call_ = vm_call_general;
4954}
4955
4956static VALUE
4957vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4958{
4959 RB_DEBUG_COUNTER_INC(ccf_super_method);
4960
4961 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4962 // can merge the function and the address of the function becomes same.
4963 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4964 if (ec == NULL) rb_bug("unreachable");
4965
4966 /* this check is required to distinguish with other functions. */
4967 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4968 return vm_call_method(ec, reg_cfp, calling);
4969}
4970
4971/* super */
4972
4973static inline VALUE
4974vm_search_normal_superclass(VALUE klass)
4975{
4976 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4977 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4978 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4979 klass = RBASIC(klass)->klass;
4980 }
4981 klass = RCLASS_ORIGIN(klass);
4982 return RCLASS_SUPER(klass);
4983}
4984
4985NORETURN(static void vm_super_outside(void));
4986
4987static void
4988vm_super_outside(void)
4989{
4990 rb_raise(rb_eNoMethodError, "super called outside of method");
4991}
4992
4993static const struct rb_callcache *
4994empty_cc_for_super(void)
4995{
4996 return &vm_empty_cc_for_super;
4997}
4998
4999static const struct rb_callcache *
5000vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5001{
5002 VALUE current_defined_class;
5003 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5004
5005 if (!me) {
5006 vm_super_outside();
5007 }
5008
5009 current_defined_class = vm_defined_class_for_protected_call(me);
5010
5011 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5012 reg_cfp->iseq != method_entry_iseqptr(me) &&
5013 !rb_obj_is_kind_of(recv, current_defined_class)) {
5014 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5015 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5016
5017 if (m) { /* not bound UnboundMethod */
5018 rb_raise(rb_eTypeError,
5019 "self has wrong type to call super in this context: "
5020 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5021 rb_obj_class(recv), m);
5022 }
5023 }
5024
5025 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5026 rb_raise(rb_eRuntimeError,
5027 "implicit argument passing of super from method defined"
5028 " by define_method() is not supported."
5029 " Specify all arguments explicitly.");
5030 }
5031
5032 ID mid = me->def->original_id;
5033
5034 if (!vm_ci_markable(cd->ci)) {
5035 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5036 }
5037 else {
5038 // update iseq. really? (TODO)
5039 cd->ci = vm_ci_new_runtime(mid,
5040 vm_ci_flag(cd->ci),
5041 vm_ci_argc(cd->ci),
5042 vm_ci_kwarg(cd->ci));
5043
5044 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5045 }
5046
5047 const struct rb_callcache *cc;
5048
5049 VALUE klass = vm_search_normal_superclass(me->defined_class);
5050
5051 if (!klass) {
5052 /* bound instance method of module */
5053 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5054 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5055 }
5056 else {
5057 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5058 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5059
5060 // define_method can cache for different method id
5061 if (cached_cme == NULL) {
5062 // empty_cc_for_super is not markable object
5063 cd->cc = empty_cc_for_super();
5064 }
5065 else if (cached_cme->called_id != mid) {
5066 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5067 if (cme) {
5068 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5069 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5070 }
5071 else {
5072 cd->cc = cc = empty_cc_for_super();
5073 }
5074 }
5075 else {
5076 switch (cached_cme->def->type) {
5077 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5078 case VM_METHOD_TYPE_REFINED:
5079 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5080 case VM_METHOD_TYPE_ATTRSET:
5081 case VM_METHOD_TYPE_IVAR:
5082 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5083 break;
5084 default:
5085 break; // use fastpath
5086 }
5087 }
5088 }
5089
5090 VM_ASSERT((vm_cc_cme(cc), true));
5091
5092 return cc;
5093}
5094
5095/* yield */
5096
5097static inline int
5098block_proc_is_lambda(const VALUE procval)
5099{
5100 rb_proc_t *proc;
5101
5102 if (procval) {
5103 GetProcPtr(procval, proc);
5104 return proc->is_lambda;
5105 }
5106 else {
5107 return 0;
5108 }
5109}
5110
5111static VALUE
5112vm_yield_with_cfunc(rb_execution_context_t *ec,
5113 const struct rb_captured_block *captured,
5114 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5116{
5117 int is_lambda = FALSE; /* TODO */
5118 VALUE val, arg, blockarg;
5119 int frame_flag;
5120 const struct vm_ifunc *ifunc = captured->code.ifunc;
5121
5122 if (is_lambda) {
5123 arg = rb_ary_new4(argc, argv);
5124 }
5125 else if (argc == 0) {
5126 arg = Qnil;
5127 }
5128 else {
5129 arg = argv[0];
5130 }
5131
5132 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5133
5134 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5135 if (kw_splat) {
5136 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5137 }
5138
5139 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5140 frame_flag,
5141 self,
5142 VM_GUARDED_PREV_EP(captured->ep),
5143 (VALUE)me,
5144 0, ec->cfp->sp, 0, 0);
5145 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5146 rb_vm_pop_frame(ec);
5147
5148 return val;
5149}
5150
5151VALUE
5152rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5153{
5154 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5155}
5156
5157static VALUE
5158vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5159{
5160 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5161}
5162
5163static inline int
5164vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5165{
5166 int i;
5167 long len = RARRAY_LEN(ary);
5168
5169 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5170
5171 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5172 argv[i] = RARRAY_AREF(ary, i);
5173 }
5174
5175 return i;
5176}
5177
5178static inline VALUE
5179vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5180{
5181 VALUE ary, arg0 = argv[0];
5182 ary = rb_check_array_type(arg0);
5183#if 0
5184 argv[0] = arg0;
5185#else
5186 VM_ASSERT(argv[0] == arg0);
5187#endif
5188 return ary;
5189}
5190
5191static int
5192vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5193{
5194 if (rb_simple_iseq_p(iseq)) {
5195 rb_control_frame_t *cfp = ec->cfp;
5196 VALUE arg0;
5197
5198 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5199
5200 if (arg_setup_type == arg_setup_block &&
5201 calling->argc == 1 &&
5202 ISEQ_BODY(iseq)->param.flags.has_lead &&
5203 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5204 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5205 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5206 }
5207
5208 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5209 if (arg_setup_type == arg_setup_block) {
5210 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5211 int i;
5212 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5213 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5214 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5215 }
5216 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5217 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5218 }
5219 }
5220 else {
5221 argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5222 }
5223 }
5224
5225 return 0;
5226 }
5227 else {
5228 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5229 }
5230}
5231
5232static int
5233vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5234{
5235 struct rb_calling_info calling_entry, *calling;
5236
5237 calling = &calling_entry;
5238 calling->argc = argc;
5239 calling->block_handler = block_handler;
5240 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5241 calling->recv = Qundef;
5242 calling->heap_argv = 0;
5243 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5244
5245 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5246}
5247
5248/* ruby iseq -> ruby block */
5249
5250static VALUE
5251vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5252 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5253 bool is_lambda, VALUE block_handler)
5254{
5255 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5256 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5257 const int arg_size = ISEQ_BODY(iseq)->param.size;
5258 VALUE * const rsp = GET_SP() - calling->argc;
5259 VALUE * const argv = rsp;
5260 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5261
5262 SET_SP(rsp);
5263
5264 vm_push_frame(ec, iseq,
5265 VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
5266 captured->self,
5267 VM_GUARDED_PREV_EP(captured->ep), 0,
5268 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5269 rsp + arg_size,
5270 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5271
5272 return Qundef;
5273}
5274
5275static VALUE
5276vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5277 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5278 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5279{
5280 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5281 int flags = vm_ci_flag(ci);
5282
5283 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5284 ((calling->argc == 0) ||
5285 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5286 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5287 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5288 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5289 flags = 0;
5290 if (UNLIKELY(calling->heap_argv)) {
5291#if VM_ARGC_STACK_MAX < 0
5292 if (RARRAY_LEN(calling->heap_argv) < 1) {
5293 rb_raise(rb_eArgError, "no receiver given");
5294 }
5295#endif
5296 calling->recv = rb_ary_shift(calling->heap_argv);
5297 // Modify stack to avoid cfp consistency error
5298 reg_cfp->sp++;
5299 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5300 reg_cfp->sp[-2] = calling->recv;
5301 flags |= VM_CALL_ARGS_SPLAT;
5302 }
5303 else {
5304 if (calling->argc < 1) {
5305 rb_raise(rb_eArgError, "no receiver given");
5306 }
5307 calling->recv = TOPN(--calling->argc);
5308 }
5309 if (calling->kw_splat) {
5310 flags |= VM_CALL_KW_SPLAT;
5311 }
5312 }
5313 else {
5314 if (calling->argc < 1) {
5315 rb_raise(rb_eArgError, "no receiver given");
5316 }
5317 calling->recv = TOPN(--calling->argc);
5318 }
5319
5320 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5321}
5322
5323static VALUE
5324vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5325 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5326 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5327{
5328 VALUE val;
5329 int argc;
5330 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5331 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5332 argc = calling->argc;
5333 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5334 POPN(argc); /* TODO: should put before C/yield? */
5335 return val;
5336}
5337
5338static VALUE
5339vm_proc_to_block_handler(VALUE procval)
5340{
5341 const struct rb_block *block = vm_proc_block(procval);
5342
5343 switch (vm_block_type(block)) {
5344 case block_type_iseq:
5345 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5346 case block_type_ifunc:
5347 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5348 case block_type_symbol:
5349 return VM_BH_FROM_SYMBOL(block->as.symbol);
5350 case block_type_proc:
5351 return VM_BH_FROM_PROC(block->as.proc);
5352 }
5353 VM_UNREACHABLE(vm_yield_with_proc);
5354 return Qundef;
5355}
5356
5357static VALUE
5358vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5359 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5360 bool is_lambda, VALUE block_handler)
5361{
5362 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5363 VALUE proc = VM_BH_TO_PROC(block_handler);
5364 is_lambda = block_proc_is_lambda(proc);
5365 block_handler = vm_proc_to_block_handler(proc);
5366 }
5367
5368 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5369}
5370
5371static inline VALUE
5372vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5373 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5374 bool is_lambda, VALUE block_handler)
5375{
5376 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5377 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5378 bool is_lambda, VALUE block_handler);
5379
5380 switch (vm_block_handler_type(block_handler)) {
5381 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5382 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5383 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5384 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5385 default: rb_bug("vm_invoke_block: unreachable");
5386 }
5387
5388 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5389}
5390
5391static VALUE
5392vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5393{
5394 const rb_execution_context_t *ec = GET_EC();
5395 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5396 struct rb_captured_block *captured;
5397
5398 if (cfp == 0) {
5399 rb_bug("vm_make_proc_with_iseq: unreachable");
5400 }
5401
5402 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5403 captured->code.iseq = blockiseq;
5404
5405 return rb_vm_make_proc(ec, captured, rb_cProc);
5406}
5407
5408static VALUE
5409vm_once_exec(VALUE iseq)
5410{
5411 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5412 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5413}
5414
5415static VALUE
5416vm_once_clear(VALUE data)
5417{
5418 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5419 is->once.running_thread = NULL;
5420 return Qnil;
5421}
5422
5423/* defined insn */
5424
5425static bool
5426check_respond_to_missing(VALUE obj, VALUE v)
5427{
5428 VALUE args[2];
5429 VALUE r;
5430
5431 args[0] = obj; args[1] = Qfalse;
5432 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5433 if (!UNDEF_P(r) && RTEST(r)) {
5434 return true;
5435 }
5436 else {
5437 return false;
5438 }
5439}
5440
5441static bool
5442vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5443{
5444 VALUE klass;
5445 enum defined_type type = (enum defined_type)op_type;
5446
5447 switch (type) {
5448 case DEFINED_IVAR:
5449 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5450 break;
5451 case DEFINED_GVAR:
5452 return rb_gvar_defined(SYM2ID(obj));
5453 break;
5454 case DEFINED_CVAR: {
5455 const rb_cref_t *cref = vm_get_cref(GET_EP());
5456 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5457 return rb_cvar_defined(klass, SYM2ID(obj));
5458 break;
5459 }
5460 case DEFINED_CONST:
5461 case DEFINED_CONST_FROM: {
5462 bool allow_nil = type == DEFINED_CONST;
5463 klass = v;
5464 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5465 break;
5466 }
5467 case DEFINED_FUNC:
5468 klass = CLASS_OF(v);
5469 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5470 break;
5471 case DEFINED_METHOD:{
5472 VALUE klass = CLASS_OF(v);
5473 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5474
5475 if (me) {
5476 switch (METHOD_ENTRY_VISI(me)) {
5477 case METHOD_VISI_PRIVATE:
5478 break;
5479 case METHOD_VISI_PROTECTED:
5480 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5481 break;
5482 }
5483 case METHOD_VISI_PUBLIC:
5484 return true;
5485 break;
5486 default:
5487 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5488 }
5489 }
5490 else {
5491 return check_respond_to_missing(obj, v);
5492 }
5493 break;
5494 }
5495 case DEFINED_YIELD:
5496 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5497 return true;
5498 }
5499 break;
5500 case DEFINED_ZSUPER:
5501 {
5502 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5503
5504 if (me) {
5505 VALUE klass = vm_search_normal_superclass(me->defined_class);
5506 if (!klass) return false;
5507
5508 ID id = me->def->original_id;
5509
5510 return rb_method_boundp(klass, id, 0);
5511 }
5512 }
5513 break;
5514 case DEFINED_REF:
5515 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5516 default:
5517 rb_bug("unimplemented defined? type (VM)");
5518 break;
5519 }
5520
5521 return false;
5522}
5523
5524bool
5525rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5526{
5527 return vm_defined(ec, reg_cfp, op_type, obj, v);
5528}
5529
5530static const VALUE *
5531vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5532{
5533 rb_num_t i;
5534 const VALUE *ep = reg_ep;
5535 for (i = 0; i < lv; i++) {
5536 ep = GET_PREV_EP(ep);
5537 }
5538 return ep;
5539}
5540
5541static VALUE
5542vm_get_special_object(const VALUE *const reg_ep,
5543 enum vm_special_object_type type)
5544{
5545 switch (type) {
5546 case VM_SPECIAL_OBJECT_VMCORE:
5547 return rb_mRubyVMFrozenCore;
5548 case VM_SPECIAL_OBJECT_CBASE:
5549 return vm_get_cbase(reg_ep);
5550 case VM_SPECIAL_OBJECT_CONST_BASE:
5551 return vm_get_const_base(reg_ep);
5552 default:
5553 rb_bug("putspecialobject insn: unknown value_type %d", type);
5554 }
5555}
5556
5557static VALUE
5558vm_concat_array(VALUE ary1, VALUE ary2st)
5559{
5560 const VALUE ary2 = ary2st;
5561 VALUE tmp1 = rb_check_to_array(ary1);
5562 VALUE tmp2 = rb_check_to_array(ary2);
5563
5564 if (NIL_P(tmp1)) {
5565 tmp1 = rb_ary_new3(1, ary1);
5566 }
5567 if (tmp1 == ary1) {
5568 tmp1 = rb_ary_dup(ary1);
5569 }
5570
5571 if (NIL_P(tmp2)) {
5572 return rb_ary_push(tmp1, ary2);
5573 }
5574 else {
5575 return rb_ary_concat(tmp1, tmp2);
5576 }
5577}
5578
5579static VALUE
5580vm_concat_to_array(VALUE ary1, VALUE ary2st)
5581{
5582 /* ary1 must be a newly created array */
5583 const VALUE ary2 = ary2st;
5584
5585 if (NIL_P(ary2)) return ary1;
5586
5587 VALUE tmp2 = rb_check_to_array(ary2);
5588
5589 if (NIL_P(tmp2)) {
5590 return rb_ary_push(ary1, ary2);
5591 }
5592 else {
5593 return rb_ary_concat(ary1, tmp2);
5594 }
5595}
5596
5597// YJIT implementation is using the C function
5598// and needs to call a non-static function
5599VALUE
5600rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5601{
5602 return vm_concat_array(ary1, ary2st);
5603}
5604
5605VALUE
5606rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5607{
5608 return vm_concat_to_array(ary1, ary2st);
5609}
5610
5611static VALUE
5612vm_splat_array(VALUE flag, VALUE ary)
5613{
5614 if (NIL_P(ary)) {
5615 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5616 }
5617 VALUE tmp = rb_check_to_array(ary);
5618 if (NIL_P(tmp)) {
5619 return rb_ary_new3(1, ary);
5620 }
5621 else if (RTEST(flag)) {
5622 return rb_ary_dup(tmp);
5623 }
5624 else {
5625 return tmp;
5626 }
5627}
5628
5629// YJIT implementation is using the C function
5630// and needs to call a non-static function
5631VALUE
5632rb_vm_splat_array(VALUE flag, VALUE ary)
5633{
5634 return vm_splat_array(flag, ary);
5635}
5636
5637static VALUE
5638vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5639{
5640 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5641
5642 if (flag & VM_CHECKMATCH_ARRAY) {
5643 long i;
5644 const long n = RARRAY_LEN(pattern);
5645
5646 for (i = 0; i < n; i++) {
5647 VALUE v = RARRAY_AREF(pattern, i);
5648 VALUE c = check_match(ec, v, target, type);
5649
5650 if (RTEST(c)) {
5651 return c;
5652 }
5653 }
5654 return Qfalse;
5655 }
5656 else {
5657 return check_match(ec, pattern, target, type);
5658 }
5659}
5660
5661VALUE
5662rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5663{
5664 return vm_check_match(ec, target, pattern, flag);
5665}
5666
5667static VALUE
5668vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5669{
5670 const VALUE kw_bits = *(ep - bits);
5671
5672 if (FIXNUM_P(kw_bits)) {
5673 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5674 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5675 return Qfalse;
5676 }
5677 else {
5678 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5679 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5680 }
5681 return Qtrue;
5682}
5683
5684static void
5685vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5686{
5687 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5688 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5689 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5690 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5691
5692 switch (flag) {
5693 case RUBY_EVENT_CALL:
5694 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5695 return;
5696 case RUBY_EVENT_C_CALL:
5697 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5698 return;
5699 case RUBY_EVENT_RETURN:
5700 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5701 return;
5703 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5704 return;
5705 }
5706 }
5707}
5708
5709static VALUE
5710vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5711{
5712 if (!rb_const_defined_at(cbase, id)) {
5713 return 0;
5714 }
5715 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5716 return rb_public_const_get_at(cbase, id);
5717 }
5718 else {
5719 return rb_const_get_at(cbase, id);
5720 }
5721}
5722
5723static VALUE
5724vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5725{
5726 if (!RB_TYPE_P(klass, T_CLASS)) {
5727 return 0;
5728 }
5729 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5730 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5731
5732 if (tmp != super) {
5733 rb_raise(rb_eTypeError,
5734 "superclass mismatch for class %"PRIsVALUE"",
5735 rb_id2str(id));
5736 }
5737 else {
5738 return klass;
5739 }
5740 }
5741 else {
5742 return klass;
5743 }
5744}
5745
5746static VALUE
5747vm_check_if_module(ID id, VALUE mod)
5748{
5749 if (!RB_TYPE_P(mod, T_MODULE)) {
5750 return 0;
5751 }
5752 else {
5753 return mod;
5754 }
5755}
5756
5757static VALUE
5758declare_under(ID id, VALUE cbase, VALUE c)
5759{
5760 rb_set_class_path_string(c, cbase, rb_id2str(id));
5761 rb_const_set(cbase, id, c);
5762 return c;
5763}
5764
5765static VALUE
5766vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5767{
5768 /* new class declaration */
5769 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5770 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5772 rb_class_inherited(s, c);
5773 return c;
5774}
5775
5776static VALUE
5777vm_declare_module(ID id, VALUE cbase)
5778{
5779 /* new module declaration */
5780 return declare_under(id, cbase, rb_module_new());
5781}
5782
5783NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5784static void
5785unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5786{
5787 VALUE name = rb_id2str(id);
5788 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5789 name, type);
5790 VALUE location = rb_const_source_location_at(cbase, id);
5791 if (!NIL_P(location)) {
5792 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5793 " previous definition of %"PRIsVALUE" was here",
5794 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5795 }
5797}
5798
5799static VALUE
5800vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5801{
5802 VALUE klass;
5803
5804 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5805 rb_raise(rb_eTypeError,
5806 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5807 rb_obj_class(super));
5808 }
5809
5810 vm_check_if_namespace(cbase);
5811
5812 /* find klass */
5813 rb_autoload_load(cbase, id);
5814 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5815 if (!vm_check_if_class(id, flags, super, klass))
5816 unmatched_redefinition("class", cbase, id, klass);
5817 return klass;
5818 }
5819 else {
5820 return vm_declare_class(id, flags, cbase, super);
5821 }
5822}
5823
5824static VALUE
5825vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5826{
5827 VALUE mod;
5828
5829 vm_check_if_namespace(cbase);
5830 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5831 if (!vm_check_if_module(id, mod))
5832 unmatched_redefinition("module", cbase, id, mod);
5833 return mod;
5834 }
5835 else {
5836 return vm_declare_module(id, cbase);
5837 }
5838}
5839
5840static VALUE
5841vm_find_or_create_class_by_id(ID id,
5842 rb_num_t flags,
5843 VALUE cbase,
5844 VALUE super)
5845{
5846 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5847
5848 switch (type) {
5849 case VM_DEFINECLASS_TYPE_CLASS:
5850 /* classdef returns class scope value */
5851 return vm_define_class(id, flags, cbase, super);
5852
5853 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5854 /* classdef returns class scope value */
5855 return rb_singleton_class(cbase);
5856
5857 case VM_DEFINECLASS_TYPE_MODULE:
5858 /* classdef returns class scope value */
5859 return vm_define_module(id, flags, cbase);
5860
5861 default:
5862 rb_bug("unknown defineclass type: %d", (int)type);
5863 }
5864}
5865
5866static rb_method_visibility_t
5867vm_scope_visibility_get(const rb_execution_context_t *ec)
5868{
5869 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5870
5871 if (!vm_env_cref_by_cref(cfp->ep)) {
5872 return METHOD_VISI_PUBLIC;
5873 }
5874 else {
5875 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5876 }
5877}
5878
5879static int
5880vm_scope_module_func_check(const rb_execution_context_t *ec)
5881{
5882 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5883
5884 if (!vm_env_cref_by_cref(cfp->ep)) {
5885 return FALSE;
5886 }
5887 else {
5888 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5889 }
5890}
5891
5892static void
5893vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5894{
5895 VALUE klass;
5896 rb_method_visibility_t visi;
5897 rb_cref_t *cref = vm_ec_cref(ec);
5898
5899 if (is_singleton) {
5900 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5901 visi = METHOD_VISI_PUBLIC;
5902 }
5903 else {
5904 klass = CREF_CLASS_FOR_DEFINITION(cref);
5905 visi = vm_scope_visibility_get(ec);
5906 }
5907
5908 if (NIL_P(klass)) {
5909 rb_raise(rb_eTypeError, "no class/module to add method");
5910 }
5911
5912 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5913 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5914 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5915
5916 RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
5917 }
5918
5919 if (!is_singleton && vm_scope_module_func_check(ec)) {
5920 klass = rb_singleton_class(klass);
5921 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5922 }
5923}
5924
5925static VALUE
5926vm_invokeblock_i(struct rb_execution_context_struct *ec,
5927 struct rb_control_frame_struct *reg_cfp,
5928 struct rb_calling_info *calling)
5929{
5930 const struct rb_callinfo *ci = calling->cd->ci;
5931 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5932
5933 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5934 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5935 }
5936 else {
5937 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5938 }
5939}
5940
5941enum method_explorer_type {
5942 mexp_search_method,
5943 mexp_search_invokeblock,
5944 mexp_search_super,
5945};
5946
5947static inline VALUE
5948vm_sendish(
5949 struct rb_execution_context_struct *ec,
5950 struct rb_control_frame_struct *reg_cfp,
5951 struct rb_call_data *cd,
5952 VALUE block_handler,
5953 enum method_explorer_type method_explorer
5954) {
5955 VALUE val = Qundef;
5956 const struct rb_callinfo *ci = cd->ci;
5957 const struct rb_callcache *cc;
5958 int argc = vm_ci_argc(ci);
5959 VALUE recv = TOPN(argc);
5960 struct rb_calling_info calling = {
5961 .block_handler = block_handler,
5962 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5963 .recv = recv,
5964 .argc = argc,
5965 .cd = cd,
5966 };
5967
5968 switch (method_explorer) {
5969 case mexp_search_method:
5970 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5971 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5972 break;
5973 case mexp_search_super:
5974 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5975 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5976 break;
5977 case mexp_search_invokeblock:
5978 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5979 break;
5980 }
5981 return val;
5982}
5983
5984VALUE
5985rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5986{
5987 stack_check(ec);
5988
5989 struct rb_forwarding_call_data adjusted_cd;
5990 struct rb_callinfo adjusted_ci;
5991
5992 VALUE bh;
5993 VALUE val;
5994
5995 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5996 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5997
5998 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5999
6000 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6001 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6002 }
6003 }
6004 else {
6005 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6006 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6007 }
6008
6009 VM_EXEC(ec, val);
6010 return val;
6011}
6012
6013VALUE
6014rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6015{
6016 stack_check(ec);
6017 VALUE bh = VM_BLOCK_HANDLER_NONE;
6018 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6019 VM_EXEC(ec, val);
6020 return val;
6021}
6022
6023VALUE
6024rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6025{
6026 stack_check(ec);
6027 struct rb_forwarding_call_data adjusted_cd;
6028 struct rb_callinfo adjusted_ci;
6029
6030 VALUE bh;
6031 VALUE val;
6032
6033 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6034 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6035
6036 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6037
6038 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6039 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6040 }
6041 }
6042 else {
6043 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6044 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6045 }
6046
6047 VM_EXEC(ec, val);
6048 return val;
6049}
6050
6051VALUE
6052rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6053{
6054 stack_check(ec);
6055 VALUE bh = VM_BLOCK_HANDLER_NONE;
6056 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6057 VM_EXEC(ec, val);
6058 return val;
6059}
6060
6061/* object.c */
6062VALUE rb_nil_to_s(VALUE);
6063VALUE rb_true_to_s(VALUE);
6064VALUE rb_false_to_s(VALUE);
6065/* numeric.c */
6066VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6067VALUE rb_fix_to_s(VALUE);
6068/* variable.c */
6069VALUE rb_mod_to_s(VALUE);
6071
6072static VALUE
6073vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6074{
6075 int type = TYPE(recv);
6076 if (type == T_STRING) {
6077 return recv;
6078 }
6079
6080 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6081
6082 switch (type) {
6083 case T_SYMBOL:
6084 if (check_method_basic_definition(vm_cc_cme(cc))) {
6085 // rb_sym_to_s() allocates a mutable string, but since we are only
6086 // going to use this string for interpolation, it's fine to use the
6087 // frozen string.
6088 return rb_sym2str(recv);
6089 }
6090 break;
6091 case T_MODULE:
6092 case T_CLASS:
6093 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6094 // rb_mod_to_s() allocates a mutable string, but since we are only
6095 // going to use this string for interpolation, it's fine to use the
6096 // frozen string.
6097 VALUE val = rb_mod_name(recv);
6098 if (NIL_P(val)) {
6099 val = rb_mod_to_s(recv);
6100 }
6101 return val;
6102 }
6103 break;
6104 case T_NIL:
6105 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6106 return rb_nil_to_s(recv);
6107 }
6108 break;
6109 case T_TRUE:
6110 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6111 return rb_true_to_s(recv);
6112 }
6113 break;
6114 case T_FALSE:
6115 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6116 return rb_false_to_s(recv);
6117 }
6118 break;
6119 case T_FIXNUM:
6120 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6121 return rb_fix_to_s(recv);
6122 }
6123 break;
6124 }
6125 return Qundef;
6126}
6127
6128static VALUE
6129vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6130{
6131 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6132 return ary;
6133 }
6134 else {
6135 return Qundef;
6136 }
6137}
6138
6139static VALUE
6140vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6141{
6142 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6143 return hash;
6144 }
6145 else {
6146 return Qundef;
6147 }
6148}
6149
6150static VALUE
6151vm_opt_str_freeze(VALUE str, int bop, ID id)
6152{
6153 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6154 return str;
6155 }
6156 else {
6157 return Qundef;
6158 }
6159}
6160
6161/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6162#define id_cmp idCmp
6163
6164static VALUE
6165vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6166{
6167 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6168 return rb_ary_includes(ary, target);
6169 }
6170 else {
6171 VALUE args[1] = {target};
6172
6173 // duparray
6174 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6175 VALUE dupary = rb_ary_resurrect(ary);
6176
6177 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6178 }
6179}
6180
6181VALUE
6182rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6183{
6184 return vm_opt_duparray_include_p(ec, ary, target);
6185}
6186
6187static VALUE
6188vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6189{
6190 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6191 if (num == 0) {
6192 return Qnil;
6193 }
6194 else {
6195 VALUE result = *ptr;
6196 rb_snum_t i = num - 1;
6197 while (i-- > 0) {
6198 const VALUE v = *++ptr;
6199 if (OPTIMIZED_CMP(v, result) > 0) {
6200 result = v;
6201 }
6202 }
6203 return result;
6204 }
6205 }
6206 else {
6207 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6208 }
6209}
6210
6211VALUE
6212rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6213{
6214 return vm_opt_newarray_max(ec, num, ptr);
6215}
6216
6217static VALUE
6218vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6219{
6220 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6221 if (num == 0) {
6222 return Qnil;
6223 }
6224 else {
6225 VALUE result = *ptr;
6226 rb_snum_t i = num - 1;
6227 while (i-- > 0) {
6228 const VALUE v = *++ptr;
6229 if (OPTIMIZED_CMP(v, result) < 0) {
6230 result = v;
6231 }
6232 }
6233 return result;
6234 }
6235 }
6236 else {
6237 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6238 }
6239}
6240
6241VALUE
6242rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6243{
6244 return vm_opt_newarray_min(ec, num, ptr);
6245}
6246
6247static VALUE
6248vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6249{
6250 // If Array#hash is _not_ monkeypatched, use the optimized call
6251 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6252 return rb_ary_hash_values(num, ptr);
6253 }
6254 else {
6255 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6256 }
6257}
6258
6259VALUE
6260rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6261{
6262 return vm_opt_newarray_hash(ec, num, ptr);
6263}
6264
6265VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6266VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6267
6268static VALUE
6269vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6270{
6271 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6272 struct RArray fake_ary;
6273 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6274 return rb_ary_includes(ary, target);
6275 }
6276 else {
6277 VALUE args[1] = {target};
6278 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6279 }
6280}
6281
6282VALUE
6283rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6284{
6285 return vm_opt_newarray_include_p(ec, num, ptr, target);
6286}
6287
6288static VALUE
6289vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6290{
6291 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6292 struct RArray fake_ary;
6293 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6294 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6295 }
6296 else {
6297 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6298 // Setup an array with room for keyword hash.
6299 VALUE args[2];
6300 args[0] = fmt;
6301 int kw_splat = RB_NO_KEYWORDS;
6302 int argc = 1;
6303
6304 if (!UNDEF_P(buffer)) {
6305 args[1] = rb_hash_new_with_size(1);
6306 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6307 kw_splat = RB_PASS_KEYWORDS;
6308 argc++;
6309 }
6310
6311 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6312 }
6313}
6314
6315VALUE
6316rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6317{
6318 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6319}
6320
6321VALUE
6322rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6323{
6324 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6325}
6326
6327#undef id_cmp
6328
6329static void
6330vm_track_constant_cache(ID id, void *ic)
6331{
6332 rb_vm_t *vm = GET_VM();
6333 struct rb_id_table *const_cache = vm->constant_cache;
6334 VALUE lookup_result;
6335 st_table *ics;
6336
6337 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6338 ics = (st_table *)lookup_result;
6339 }
6340 else {
6341 ics = st_init_numtable();
6342 rb_id_table_insert(const_cache, id, (VALUE)ics);
6343 }
6344
6345 /* The call below to st_insert could allocate which could trigger a GC.
6346 * If it triggers a GC, it may free an iseq that also holds a cache to this
6347 * constant. If that iseq is the last iseq with a cache to this constant, then
6348 * it will free this ST table, which would cause an use-after-free during this
6349 * st_insert.
6350 *
6351 * So to fix this issue, we store the ID that is currently being inserted
6352 * and, in remove_from_constant_cache, we don't free the ST table for ID
6353 * equal to this one.
6354 *
6355 * See [Bug #20921].
6356 */
6357 vm->inserting_constant_cache_id = id;
6358
6359 st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
6360
6361 vm->inserting_constant_cache_id = (ID)0;
6362}
6363
6364static void
6365vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6366{
6367 RB_VM_LOCK_ENTER();
6368
6369 for (int i = 0; segments[i]; i++) {
6370 ID id = segments[i];
6371 if (id == idNULL) continue;
6372 vm_track_constant_cache(id, ic);
6373 }
6374
6375 RB_VM_LOCK_LEAVE();
6376}
6377
6378// For JIT inlining
6379static inline bool
6380vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6381{
6382 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6383 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6384
6385 return (ic_cref == NULL || // no need to check CREF
6386 ic_cref == vm_get_cref(reg_ep));
6387 }
6388 return false;
6389}
6390
6391static bool
6392vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6393{
6394 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6395 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6396}
6397
6398// YJIT needs this function to never allocate and never raise
6399bool
6400rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6401{
6402 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6403}
6404
6405static void
6406vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6407{
6408 if (ruby_vm_const_missing_count > 0) {
6409 ruby_vm_const_missing_count = 0;
6410 ic->entry = NULL;
6411 return;
6412 }
6413
6414 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6415 RB_OBJ_WRITE(ice, &ice->value, val);
6416 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6417 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6418 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6419
6420 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6421 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6422 rb_yjit_constant_ic_update(iseq, ic, pos);
6423}
6424
6425VALUE
6426rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6427{
6428 VALUE val;
6429 const ID *segments = ic->segments;
6430 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6431 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6432 val = ice->value;
6433
6434 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6435 }
6436 else {
6437 ruby_vm_constant_cache_misses++;
6438 val = vm_get_ev_const_chain(ec, segments);
6439 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6440 // Undo the PC increment to get the address to this instruction
6441 // INSN_ATTR(width) == 2
6442 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6443 }
6444 return val;
6445}
6446
6447static VALUE
6448vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6449{
6450 rb_thread_t *th = rb_ec_thread_ptr(ec);
6451 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6452
6453 again:
6454 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6455 return is->once.value;
6456 }
6457 else if (is->once.running_thread == NULL) {
6458 VALUE val;
6459 is->once.running_thread = th;
6460 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6461 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6462 /* is->once.running_thread is cleared by vm_once_clear() */
6463 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6464 return val;
6465 }
6466 else if (is->once.running_thread == th) {
6467 /* recursive once */
6468 return vm_once_exec((VALUE)iseq);
6469 }
6470 else {
6471 /* waiting for finish */
6472 RUBY_VM_CHECK_INTS(ec);
6474 goto again;
6475 }
6476}
6477
6478static OFFSET
6479vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6480{
6481 switch (OBJ_BUILTIN_TYPE(key)) {
6482 case -1:
6483 case T_FLOAT:
6484 case T_SYMBOL:
6485 case T_BIGNUM:
6486 case T_STRING:
6487 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6488 SYMBOL_REDEFINED_OP_FLAG |
6489 INTEGER_REDEFINED_OP_FLAG |
6490 FLOAT_REDEFINED_OP_FLAG |
6491 NIL_REDEFINED_OP_FLAG |
6492 TRUE_REDEFINED_OP_FLAG |
6493 FALSE_REDEFINED_OP_FLAG |
6494 STRING_REDEFINED_OP_FLAG)) {
6495 st_data_t val;
6496 if (RB_FLOAT_TYPE_P(key)) {
6497 double kval = RFLOAT_VALUE(key);
6498 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6499 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6500 }
6501 }
6502 if (rb_hash_stlike_lookup(hash, key, &val)) {
6503 return FIX2LONG((VALUE)val);
6504 }
6505 else {
6506 return else_offset;
6507 }
6508 }
6509 }
6510 return 0;
6511}
6512
6513NORETURN(static void
6514 vm_stack_consistency_error(const rb_execution_context_t *ec,
6515 const rb_control_frame_t *,
6516 const VALUE *));
6517static void
6518vm_stack_consistency_error(const rb_execution_context_t *ec,
6519 const rb_control_frame_t *cfp,
6520 const VALUE *bp)
6521{
6522 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6523 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6524 static const char stack_consistency_error[] =
6525 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6526#if defined RUBY_DEVEL
6527 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6528 rb_str_cat_cstr(mesg, "\n");
6529 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6531#else
6532 rb_bug(stack_consistency_error, nsp, nbp);
6533#endif
6534}
6535
6536static VALUE
6537vm_opt_plus(VALUE recv, VALUE obj)
6538{
6539 if (FIXNUM_2_P(recv, obj) &&
6540 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6541 return rb_fix_plus_fix(recv, obj);
6542 }
6543 else if (FLONUM_2_P(recv, obj) &&
6544 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6545 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6546 }
6547 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6548 return Qundef;
6549 }
6550 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6551 RBASIC_CLASS(obj) == rb_cFloat &&
6552 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6553 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6554 }
6555 else if (RBASIC_CLASS(recv) == rb_cString &&
6556 RBASIC_CLASS(obj) == rb_cString &&
6557 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6558 return rb_str_opt_plus(recv, obj);
6559 }
6560 else if (RBASIC_CLASS(recv) == rb_cArray &&
6561 RBASIC_CLASS(obj) == rb_cArray &&
6562 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6563 return rb_ary_plus(recv, obj);
6564 }
6565 else {
6566 return Qundef;
6567 }
6568}
6569
6570static VALUE
6571vm_opt_minus(VALUE recv, VALUE obj)
6572{
6573 if (FIXNUM_2_P(recv, obj) &&
6574 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6575 return rb_fix_minus_fix(recv, obj);
6576 }
6577 else if (FLONUM_2_P(recv, obj) &&
6578 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6579 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6580 }
6581 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6582 return Qundef;
6583 }
6584 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6585 RBASIC_CLASS(obj) == rb_cFloat &&
6586 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6587 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6588 }
6589 else {
6590 return Qundef;
6591 }
6592}
6593
6594static VALUE
6595vm_opt_mult(VALUE recv, VALUE obj)
6596{
6597 if (FIXNUM_2_P(recv, obj) &&
6598 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6599 return rb_fix_mul_fix(recv, obj);
6600 }
6601 else if (FLONUM_2_P(recv, obj) &&
6602 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6603 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6604 }
6605 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6606 return Qundef;
6607 }
6608 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6609 RBASIC_CLASS(obj) == rb_cFloat &&
6610 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6611 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6612 }
6613 else {
6614 return Qundef;
6615 }
6616}
6617
6618static VALUE
6619vm_opt_div(VALUE recv, VALUE obj)
6620{
6621 if (FIXNUM_2_P(recv, obj) &&
6622 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6623 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6624 }
6625 else if (FLONUM_2_P(recv, obj) &&
6626 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6627 return rb_flo_div_flo(recv, obj);
6628 }
6629 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6630 return Qundef;
6631 }
6632 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6633 RBASIC_CLASS(obj) == rb_cFloat &&
6634 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6635 return rb_flo_div_flo(recv, obj);
6636 }
6637 else {
6638 return Qundef;
6639 }
6640}
6641
6642static VALUE
6643vm_opt_mod(VALUE recv, VALUE obj)
6644{
6645 if (FIXNUM_2_P(recv, obj) &&
6646 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6647 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6648 }
6649 else if (FLONUM_2_P(recv, obj) &&
6650 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6651 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6652 }
6653 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6654 return Qundef;
6655 }
6656 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6657 RBASIC_CLASS(obj) == rb_cFloat &&
6658 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6659 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6660 }
6661 else {
6662 return Qundef;
6663 }
6664}
6665
6666static VALUE
6667vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6668{
6669 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6670 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6671
6672 if (!UNDEF_P(val)) {
6673 return RBOOL(!RTEST(val));
6674 }
6675 }
6676
6677 return Qundef;
6678}
6679
6680static VALUE
6681vm_opt_lt(VALUE recv, VALUE obj)
6682{
6683 if (FIXNUM_2_P(recv, obj) &&
6684 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6685 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6686 }
6687 else if (FLONUM_2_P(recv, obj) &&
6688 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6689 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6690 }
6691 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6692 return Qundef;
6693 }
6694 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6695 RBASIC_CLASS(obj) == rb_cFloat &&
6696 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6697 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6698 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6699 }
6700 else {
6701 return Qundef;
6702 }
6703}
6704
6705static VALUE
6706vm_opt_le(VALUE recv, VALUE obj)
6707{
6708 if (FIXNUM_2_P(recv, obj) &&
6709 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6710 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6711 }
6712 else if (FLONUM_2_P(recv, obj) &&
6713 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6714 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6715 }
6716 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6717 return Qundef;
6718 }
6719 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6720 RBASIC_CLASS(obj) == rb_cFloat &&
6721 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6722 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6723 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6724 }
6725 else {
6726 return Qundef;
6727 }
6728}
6729
6730static VALUE
6731vm_opt_gt(VALUE recv, VALUE obj)
6732{
6733 if (FIXNUM_2_P(recv, obj) &&
6734 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6735 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6736 }
6737 else if (FLONUM_2_P(recv, obj) &&
6738 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6739 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6740 }
6741 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6742 return Qundef;
6743 }
6744 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6745 RBASIC_CLASS(obj) == rb_cFloat &&
6746 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6747 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6748 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6749 }
6750 else {
6751 return Qundef;
6752 }
6753}
6754
6755static VALUE
6756vm_opt_ge(VALUE recv, VALUE obj)
6757{
6758 if (FIXNUM_2_P(recv, obj) &&
6759 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6760 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6761 }
6762 else if (FLONUM_2_P(recv, obj) &&
6763 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6764 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6765 }
6766 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6767 return Qundef;
6768 }
6769 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6770 RBASIC_CLASS(obj) == rb_cFloat &&
6771 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6772 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6773 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6774 }
6775 else {
6776 return Qundef;
6777 }
6778}
6779
6780
6781static VALUE
6782vm_opt_ltlt(VALUE recv, VALUE obj)
6783{
6784 if (SPECIAL_CONST_P(recv)) {
6785 return Qundef;
6786 }
6787 else if (RBASIC_CLASS(recv) == rb_cString &&
6788 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6789 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6790 return rb_str_buf_append(recv, obj);
6791 }
6792 else {
6793 return rb_str_concat(recv, obj);
6794 }
6795 }
6796 else if (RBASIC_CLASS(recv) == rb_cArray &&
6797 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6798 return rb_ary_push(recv, obj);
6799 }
6800 else {
6801 return Qundef;
6802 }
6803}
6804
6805static VALUE
6806vm_opt_and(VALUE recv, VALUE obj)
6807{
6808 // If recv and obj are both fixnums, then the bottom tag bit
6809 // will be 1 on both. 1 & 1 == 1, so the result value will also
6810 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6811 // will be 0, and we return Qundef.
6812 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6813
6814 if (FIXNUM_P(ret) &&
6815 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6816 return ret;
6817 }
6818 else {
6819 return Qundef;
6820 }
6821}
6822
6823static VALUE
6824vm_opt_or(VALUE recv, VALUE obj)
6825{
6826 if (FIXNUM_2_P(recv, obj) &&
6827 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6828 return recv | obj;
6829 }
6830 else {
6831 return Qundef;
6832 }
6833}
6834
6835static VALUE
6836vm_opt_aref(VALUE recv, VALUE obj)
6837{
6838 if (SPECIAL_CONST_P(recv)) {
6839 if (FIXNUM_2_P(recv, obj) &&
6840 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6841 return rb_fix_aref(recv, obj);
6842 }
6843 return Qundef;
6844 }
6845 else if (RBASIC_CLASS(recv) == rb_cArray &&
6846 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6847 if (FIXNUM_P(obj)) {
6848 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6849 }
6850 else {
6851 return rb_ary_aref1(recv, obj);
6852 }
6853 }
6854 else if (RBASIC_CLASS(recv) == rb_cHash &&
6855 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6856 return rb_hash_aref(recv, obj);
6857 }
6858 else {
6859 return Qundef;
6860 }
6861}
6862
6863static VALUE
6864vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6865{
6866 if (SPECIAL_CONST_P(recv)) {
6867 return Qundef;
6868 }
6869 else if (RBASIC_CLASS(recv) == rb_cArray &&
6870 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6871 FIXNUM_P(obj)) {
6872 rb_ary_store(recv, FIX2LONG(obj), set);
6873 return set;
6874 }
6875 else if (RBASIC_CLASS(recv) == rb_cHash &&
6876 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6877 rb_hash_aset(recv, obj, set);
6878 return set;
6879 }
6880 else {
6881 return Qundef;
6882 }
6883}
6884
6885static VALUE
6886vm_opt_aref_with(VALUE recv, VALUE key)
6887{
6888 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6889 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6890 rb_hash_compare_by_id_p(recv) == Qfalse &&
6891 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6892 return rb_hash_aref(recv, key);
6893 }
6894 else {
6895 return Qundef;
6896 }
6897}
6898
6899VALUE
6900rb_vm_opt_aref_with(VALUE recv, VALUE key)
6901{
6902 return vm_opt_aref_with(recv, key);
6903}
6904
6905static VALUE
6906vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6907{
6908 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6909 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6910 rb_hash_compare_by_id_p(recv) == Qfalse) {
6911 return rb_hash_aset(recv, key, val);
6912 }
6913 else {
6914 return Qundef;
6915 }
6916}
6917
6918static VALUE
6919vm_opt_length(VALUE recv, int bop)
6920{
6921 if (SPECIAL_CONST_P(recv)) {
6922 return Qundef;
6923 }
6924 else if (RBASIC_CLASS(recv) == rb_cString &&
6925 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6926 if (bop == BOP_EMPTY_P) {
6927 return LONG2NUM(RSTRING_LEN(recv));
6928 }
6929 else {
6930 return rb_str_length(recv);
6931 }
6932 }
6933 else if (RBASIC_CLASS(recv) == rb_cArray &&
6934 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6935 return LONG2NUM(RARRAY_LEN(recv));
6936 }
6937 else if (RBASIC_CLASS(recv) == rb_cHash &&
6938 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6939 return INT2FIX(RHASH_SIZE(recv));
6940 }
6941 else {
6942 return Qundef;
6943 }
6944}
6945
6946static VALUE
6947vm_opt_empty_p(VALUE recv)
6948{
6949 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6950 case Qundef: return Qundef;
6951 case INT2FIX(0): return Qtrue;
6952 default: return Qfalse;
6953 }
6954}
6955
6956VALUE rb_false(VALUE obj);
6957
6958static VALUE
6959vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6960{
6961 if (NIL_P(recv) &&
6962 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6963 return Qtrue;
6964 }
6965 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6966 return Qfalse;
6967 }
6968 else {
6969 return Qundef;
6970 }
6971}
6972
6973static VALUE
6974fix_succ(VALUE x)
6975{
6976 switch (x) {
6977 case ~0UL:
6978 /* 0xFFFF_FFFF == INT2FIX(-1)
6979 * `-1.succ` is of course 0. */
6980 return INT2FIX(0);
6981 case RSHIFT(~0UL, 1):
6982 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6983 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6984 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6985 default:
6986 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6987 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6988 * == lx*2 + ly*2 + 1
6989 * == (lx*2+1) + (ly*2+1) - 1
6990 * == x + y - 1
6991 *
6992 * Here, if we put y := INT2FIX(1):
6993 *
6994 * == x + INT2FIX(1) - 1
6995 * == x + 2 .
6996 */
6997 return x + 2;
6998 }
6999}
7000
7001static VALUE
7002vm_opt_succ(VALUE recv)
7003{
7004 if (FIXNUM_P(recv) &&
7005 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7006 return fix_succ(recv);
7007 }
7008 else if (SPECIAL_CONST_P(recv)) {
7009 return Qundef;
7010 }
7011 else if (RBASIC_CLASS(recv) == rb_cString &&
7012 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7013 return rb_str_succ(recv);
7014 }
7015 else {
7016 return Qundef;
7017 }
7018}
7019
7020static VALUE
7021vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7022{
7023 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7024 return RBOOL(!RTEST(recv));
7025 }
7026 else {
7027 return Qundef;
7028 }
7029}
7030
7031static VALUE
7032vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7033{
7034 if (SPECIAL_CONST_P(recv)) {
7035 return Qundef;
7036 }
7037 else if (RBASIC_CLASS(recv) == rb_cString &&
7038 CLASS_OF(obj) == rb_cRegexp &&
7039 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7040 return rb_reg_match(obj, recv);
7041 }
7042 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7043 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7044 return rb_reg_match(recv, obj);
7045 }
7046 else {
7047 return Qundef;
7048 }
7049}
7050
7051rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7052
7053NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7054
7055static inline void
7056vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7057 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7058 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7059{
7060 rb_event_flag_t event = pc_events & target_event;
7061 VALUE self = GET_SELF();
7062
7063 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7064
7065 if (event & global_hooks->events) {
7066 /* increment PC because source line is calculated with PC-1 */
7067 reg_cfp->pc++;
7068 vm_dtrace(event, ec);
7069 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7070 reg_cfp->pc--;
7071 }
7072
7073 // Load here since global hook above can add and free local hooks
7074 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7075 if (local_hooks != NULL) {
7076 if (event & local_hooks->events) {
7077 /* increment PC because source line is calculated with PC-1 */
7078 reg_cfp->pc++;
7079 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7080 reg_cfp->pc--;
7081 }
7082 }
7083}
7084
7085#define VM_TRACE_HOOK(target_event, val) do { \
7086 if ((pc_events & (target_event)) & enabled_flags) { \
7087 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7088 } \
7089} while (0)
7090
7091static VALUE
7092rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7093{
7094 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7095 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7096 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7097}
7098
7099static void
7100vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7101{
7102 const VALUE *pc = reg_cfp->pc;
7103 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7104 rb_event_flag_t global_events = enabled_flags;
7105
7106 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7107 return;
7108 }
7109 else {
7110 const rb_iseq_t *iseq = reg_cfp->iseq;
7111 VALUE iseq_val = (VALUE)iseq;
7112 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7113 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7114 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7115 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7116 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7117 rb_hook_list_t *bmethod_local_hooks = NULL;
7118 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7119 rb_event_flag_t bmethod_local_events = 0;
7120 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7121 enabled_flags |= iseq_local_events;
7122
7123 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7124
7125 if (bmethod_frame) {
7126 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7127 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7128 bmethod_local_hooks = me->def->body.bmethod.hooks;
7129 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7130 if (bmethod_local_hooks) {
7131 bmethod_local_events = bmethod_local_hooks->events;
7132 }
7133 }
7134
7135
7136 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7137#if 0
7138 /* disable trace */
7139 /* TODO: incomplete */
7140 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7141#else
7142 /* do not disable trace because of performance problem
7143 * (re-enable overhead)
7144 */
7145#endif
7146 return;
7147 }
7148 else if (ec->trace_arg != NULL) {
7149 /* already tracing */
7150 return;
7151 }
7152 else {
7153 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7154 /* Note, not considering iseq local events here since the same
7155 * iseq could be used in multiple bmethods. */
7156 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7157
7158 if (0) {
7159 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7160 (int)pos,
7161 (int)pc_events,
7162 RSTRING_PTR(rb_iseq_path(iseq)),
7163 (int)rb_iseq_line_no(iseq, pos),
7164 RSTRING_PTR(rb_iseq_label(iseq)));
7165 }
7166 VM_ASSERT(reg_cfp->pc == pc);
7167 VM_ASSERT(pc_events != 0);
7168
7169 /* check traces */
7170 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7171 /* b_call instruction running as a method. Fire call event. */
7172 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7173 }
7175 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7176 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7177 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7178 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7179 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7180 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7181 /* b_return instruction running as a method. Fire return event. */
7182 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7183 }
7184
7185 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7186 // We need the pointer to stay valid in case compaction happens in a trace hook.
7187 //
7188 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7189 // storage for `rb_method_definition_t` is not on the GC heap.
7190 RB_GC_GUARD(iseq_val);
7191 }
7192 }
7193}
7194#undef VM_TRACE_HOOK
7195
7196#if VM_CHECK_MODE > 0
7197NORETURN( NOINLINE( COLDFUNC
7198void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7199
7200void
7201Init_vm_stack_canary(void)
7202{
7203 /* This has to be called _after_ our PRNG is properly set up. */
7204 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7205 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7206
7207 vm_stack_canary_was_born = true;
7208 VM_ASSERT(n == 0);
7209}
7210
7211void
7212rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7213{
7214 /* Because a method has already been called, why not call
7215 * another one. */
7216 const char *insn = rb_insns_name(i);
7217 VALUE inspection = rb_inspect(c);
7218 const char *str = StringValueCStr(inspection);
7219
7220 rb_bug("dead canary found at %s: %s", insn, str);
7221}
7222
7223#else
7224void Init_vm_stack_canary(void) { /* nothing to do */ }
7225#endif
7226
7227
7228/* a part of the following code is generated by this ruby script:
7229
723016.times{|i|
7231 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7232 typedef_args.prepend(", ") if i != 0
7233 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7234 call_args.prepend(", ") if i != 0
7235 puts %Q{
7236static VALUE
7237builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7238{
7239 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7240 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7241}}
7242}
7243
7244puts
7245puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
724616.times{|i|
7247 puts " builtin_invoker#{i},"
7248}
7249puts "};"
7250*/
7251
7252static VALUE
7253builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7254{
7255 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7256 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7257}
7258
7259static VALUE
7260builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7261{
7262 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7263 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7264}
7265
7266static VALUE
7267builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7268{
7269 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7270 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7271}
7272
7273static VALUE
7274builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7275{
7276 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7277 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7278}
7279
7280static VALUE
7281builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7282{
7283 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7284 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7285}
7286
7287static VALUE
7288builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7289{
7290 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7291 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7292}
7293
7294static VALUE
7295builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7296{
7297 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7298 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7299}
7300
7301static VALUE
7302builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7303{
7304 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7305 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7306}
7307
7308static VALUE
7309builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7310{
7311 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7312 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7313}
7314
7315static VALUE
7316builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7317{
7318 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7319 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7320}
7321
7322static VALUE
7323builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7324{
7325 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7326 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7327}
7328
7329static VALUE
7330builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7331{
7332 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7333 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7334}
7335
7336static VALUE
7337builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7338{
7339 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7340 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7341}
7342
7343static VALUE
7344builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7345{
7346 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7347 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7348}
7349
7350static VALUE
7351builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7352{
7353 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7354 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7355}
7356
7357static VALUE
7358builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7359{
7360 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7361 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7362}
7363
7364typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7365
7366static builtin_invoker
7367lookup_builtin_invoker(int argc)
7368{
7369 static const builtin_invoker invokers[] = {
7370 builtin_invoker0,
7371 builtin_invoker1,
7372 builtin_invoker2,
7373 builtin_invoker3,
7374 builtin_invoker4,
7375 builtin_invoker5,
7376 builtin_invoker6,
7377 builtin_invoker7,
7378 builtin_invoker8,
7379 builtin_invoker9,
7380 builtin_invoker10,
7381 builtin_invoker11,
7382 builtin_invoker12,
7383 builtin_invoker13,
7384 builtin_invoker14,
7385 builtin_invoker15,
7386 };
7387
7388 return invokers[argc];
7389}
7390
7391static inline VALUE
7392invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7393{
7394 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7395 SETUP_CANARY(canary_p);
7396 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7397 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7398 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7399 return ret;
7400}
7401
7402static VALUE
7403vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7404{
7405 return invoke_bf(ec, cfp, bf, argv);
7406}
7407
7408static VALUE
7409vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7410{
7411 if (0) { // debug print
7412 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7413 for (int i=0; i<bf->argc; i++) {
7414 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7415 }
7416 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7417 (void *)(uintptr_t)bf->func_ptr);
7418 }
7419
7420 if (bf->argc == 0) {
7421 return invoke_bf(ec, cfp, bf, NULL);
7422 }
7423 else {
7424 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7425 return invoke_bf(ec, cfp, bf, argv);
7426 }
7427}
7428
7429// for __builtin_inline!()
7430
7431VALUE
7432rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7433{
7434 const rb_control_frame_t *cfp = ec->cfp;
7435 return cfp->ep[index];
7436}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2297
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1076
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:971
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:950
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:130
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:676
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:689
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:68
VALUE rb_cArray
Array class.
Definition array.c:41
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2121
VALUE rb_cRegexp
Regexp class.
Definition re.c:2661
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1296
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:680
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_cModule
Module class.
Definition object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:44
VALUE rb_cString
String class.
Definition string.c:80
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1021
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1951
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3716
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1926
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2008
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1909
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1975
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2041
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:4068
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5660
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:4034
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4310
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2730
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:894
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1481
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3215
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1924
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4007
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4062
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1415
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3684
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3050
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:130
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3221
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:416
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:1941
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3543
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4084
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:373
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3537
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1295
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1828
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:986
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:36
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:48
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:50
THROW_DATA.
Definition imemo.h:57
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376