Ruby 3.5.0dev (2025-07-13 revision f03cc0b514e400c4ea1428103e1f1f601c22fa86)
vm_insnhelper.c (f03cc0b514e400c4ea1428103e1f1f601c22fa86)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
766{
767 const VALUE *ep = cfp->ep;
769
770 while (!VM_ENV_LOCAL_P(ep)) {
771 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
772 ep = VM_ENV_PREV_EP(ep);
773 }
774
775 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
776}
777
778static const rb_iseq_t *
779method_entry_iseqptr(const rb_callable_method_entry_t *me)
780{
781 switch (me->def->type) {
782 case VM_METHOD_TYPE_ISEQ:
783 return me->def->body.iseq.iseqptr;
784 default:
785 return NULL;
786 }
787}
788
789static rb_cref_t *
790method_entry_cref(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.cref;
795 default:
796 return NULL;
797 }
798}
799
800#if VM_CHECK_MODE == 0
801PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
802#endif
803static rb_cref_t *
804check_cref(VALUE obj, int can_be_svar)
805{
806 if (obj == Qfalse) return NULL;
807
808#if VM_CHECK_MODE > 0
809 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
810#endif
811
812 switch (imemo_type(obj)) {
813 case imemo_ment:
814 return method_entry_cref((rb_callable_method_entry_t *)obj);
815 case imemo_cref:
816 return (rb_cref_t *)obj;
817 case imemo_svar:
818 if (can_be_svar) {
819 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
820 }
821 default:
822#if VM_CHECK_MODE > 0
823 rb_bug("check_method_entry: svar should not be there:");
824#endif
825 return NULL;
826 }
827}
828
829static inline rb_cref_t *
830vm_env_cref(const VALUE *ep)
831{
832 rb_cref_t *cref;
833
834 while (!VM_ENV_LOCAL_P(ep)) {
835 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
836 ep = VM_ENV_PREV_EP(ep);
837 }
838
839 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
840}
841
842static int
843is_cref(const VALUE v, int can_be_svar)
844{
845 if (RB_TYPE_P(v, T_IMEMO)) {
846 switch (imemo_type(v)) {
847 case imemo_cref:
848 return TRUE;
849 case imemo_svar:
850 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
851 default:
852 break;
853 }
854 }
855 return FALSE;
856}
857
858static int
859vm_env_cref_by_cref(const VALUE *ep)
860{
861 while (!VM_ENV_LOCAL_P(ep)) {
862 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
863 ep = VM_ENV_PREV_EP(ep);
864 }
865 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
866}
867
868static rb_cref_t *
869cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
870{
871 const VALUE v = *vptr;
872 rb_cref_t *cref, *new_cref;
873
874 if (RB_TYPE_P(v, T_IMEMO)) {
875 switch (imemo_type(v)) {
876 case imemo_cref:
877 cref = (rb_cref_t *)v;
878 new_cref = vm_cref_dup(cref);
879 if (parent) {
880 RB_OBJ_WRITE(parent, vptr, new_cref);
881 }
882 else {
883 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
884 }
885 return (rb_cref_t *)new_cref;
886 case imemo_svar:
887 if (can_be_svar) {
888 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
889 }
890 /* fall through */
891 case imemo_ment:
892 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
893 default:
894 break;
895 }
896 }
897 return NULL;
898}
899
900static rb_cref_t *
901vm_cref_replace_with_duplicated_cref(const VALUE *ep)
902{
903 if (vm_env_cref_by_cref(ep)) {
904 rb_cref_t *cref;
905 VALUE envval;
906
907 while (!VM_ENV_LOCAL_P(ep)) {
908 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
909 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
910 return cref;
911 }
912 ep = VM_ENV_PREV_EP(ep);
913 }
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
916 }
917 else {
918 rb_bug("vm_cref_dup: unreachable");
919 }
920}
921
922static rb_cref_t *
923vm_get_cref(const VALUE *ep)
924{
925 rb_cref_t *cref = vm_env_cref(ep);
926
927 if (cref != NULL) {
928 return cref;
929 }
930 else {
931 rb_bug("vm_get_cref: unreachable");
932 }
933}
934
935rb_cref_t *
936rb_vm_get_cref(const VALUE *ep)
937{
938 return vm_get_cref(ep);
939}
940
941static rb_cref_t *
942vm_ec_cref(const rb_execution_context_t *ec)
943{
944 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
945
946 if (cfp == NULL) {
947 return NULL;
948 }
949 return vm_get_cref(cfp->ep);
950}
951
952static const rb_cref_t *
953vm_get_const_key_cref(const VALUE *ep)
954{
955 const rb_cref_t *cref = vm_get_cref(ep);
956 const rb_cref_t *key_cref = cref;
957
958 while (cref) {
959 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
960 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
961 return key_cref;
962 }
963 cref = CREF_NEXT(cref);
964 }
965
966 /* does not include singleton class */
967 return NULL;
968}
969
970rb_cref_t *
971rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
972{
973 rb_cref_t *new_cref_head = NULL;
974 rb_cref_t *new_cref_tail = NULL;
975
976 #define ADD_NEW_CREF(new_cref) \
977 if (new_cref_tail) { \
978 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
979 } \
980 else { \
981 new_cref_head = new_cref; \
982 } \
983 new_cref_tail = new_cref;
984
985 while (cref) {
986 rb_cref_t *new_cref;
987 if (CREF_CLASS(cref) == old_klass) {
988 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989 ADD_NEW_CREF(new_cref);
990 return new_cref_head;
991 }
992 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993 cref = CREF_NEXT(cref);
994 ADD_NEW_CREF(new_cref);
995 }
996
997 #undef ADD_NEW_CREF
998
999 // Could we just reuse the original cref?
1000 return new_cref_head;
1001}
1002
1003static rb_cref_t *
1004vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1005{
1006 rb_cref_t *prev_cref = NULL;
1007
1008 if (ep) {
1009 prev_cref = vm_env_cref(ep);
1010 }
1011 else {
1012 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1013
1014 if (cfp) {
1015 prev_cref = vm_env_cref(cfp->ep);
1016 }
1017 }
1018
1019 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1020}
1021
1022static inline VALUE
1023vm_get_cbase(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 return CREF_CLASS_FOR_DEFINITION(cref);
1028}
1029
1030static inline VALUE
1031vm_get_const_base(const VALUE *ep)
1032{
1033 const rb_cref_t *cref = vm_get_cref(ep);
1034
1035 while (cref) {
1036 if (!CREF_PUSHED_BY_EVAL(cref)) {
1037 return CREF_CLASS_FOR_DEFINITION(cref);
1038 }
1039 cref = CREF_NEXT(cref);
1040 }
1041
1042 return Qundef;
1043}
1044
1045static inline void
1046vm_check_if_namespace(VALUE klass)
1047{
1048 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1049 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1050 }
1051}
1052
1053static inline void
1054vm_ensure_not_refinement_module(VALUE self)
1055{
1056 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1057 rb_warn("not defined at the refinement, but at the outer class/module");
1058 }
1059}
1060
1061static inline VALUE
1062vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1063{
1064 return klass;
1065}
1066
1067static inline VALUE
1068vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1069{
1070 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1071 VALUE val;
1072
1073 if (NIL_P(orig_klass) && allow_nil) {
1074 /* in current lexical scope */
1075 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1076 const rb_cref_t *cref;
1077 VALUE klass = Qnil;
1078
1079 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1080 root_cref = CREF_NEXT(root_cref);
1081 }
1082 cref = root_cref;
1083 while (cref && CREF_NEXT(cref)) {
1084 if (CREF_PUSHED_BY_EVAL(cref)) {
1085 klass = Qnil;
1086 }
1087 else {
1088 klass = CREF_CLASS(cref);
1089 }
1090 cref = CREF_NEXT(cref);
1091
1092 if (!NIL_P(klass)) {
1093 VALUE av, am = 0;
1094 rb_const_entry_t *ce;
1095 search_continue:
1096 if ((ce = rb_const_lookup(klass, id))) {
1097 rb_const_warn_if_deprecated(ce, klass, id);
1098 val = ce->value;
1099 if (UNDEF_P(val)) {
1100 if (am == klass) break;
1101 am = klass;
1102 if (is_defined) return 1;
1103 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1104 rb_autoload_load(klass, id);
1105 goto search_continue;
1106 }
1107 else {
1108 if (is_defined) {
1109 return 1;
1110 }
1111 else {
1112 if (UNLIKELY(!rb_ractor_main_p())) {
1113 if (!rb_ractor_shareable_p(val)) {
1114 rb_raise(rb_eRactorIsolationError,
1115 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1116 }
1117 }
1118 return val;
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125 /* search self */
1126 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1127 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1128 }
1129 else {
1130 klass = CLASS_OF(ec->cfp->self);
1131 }
1132
1133 if (is_defined) {
1134 return rb_const_defined(klass, id);
1135 }
1136 else {
1137 return rb_const_get(klass, id);
1138 }
1139 }
1140 else {
1141 vm_check_if_namespace(orig_klass);
1142 if (is_defined) {
1143 return rb_public_const_defined_from(orig_klass, id);
1144 }
1145 else {
1146 return rb_public_const_get_from(orig_klass, id);
1147 }
1148 }
1149}
1150
1151VALUE
1152rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1153{
1154 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1155}
1156
1157static inline VALUE
1158vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1159{
1160 VALUE val = Qnil;
1161 int idx = 0;
1162 int allow_nil = TRUE;
1163 if (segments[0] == idNULL) {
1164 val = rb_cObject;
1165 idx++;
1166 allow_nil = FALSE;
1167 }
1168 while (segments[idx]) {
1169 ID id = segments[idx++];
1170 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1171 allow_nil = FALSE;
1172 }
1173 return val;
1174}
1175
1176
1177static inline VALUE
1178vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1179{
1180 VALUE klass;
1181
1182 if (!cref) {
1183 rb_bug("vm_get_cvar_base: no cref");
1184 }
1185
1186 while (CREF_NEXT(cref) &&
1187 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1188 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1189 cref = CREF_NEXT(cref);
1190 }
1191 if (top_level_raise && !CREF_NEXT(cref)) {
1192 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1193 }
1194
1195 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1196
1197 if (NIL_P(klass)) {
1198 rb_raise(rb_eTypeError, "no class variables available");
1199 }
1200 return klass;
1201}
1202
1203ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1204static inline void
1205fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1206{
1207 if (is_attr) {
1208 vm_cc_attr_index_set(cc, index, shape_id);
1209 }
1210 else {
1211 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1212 }
1213}
1214
1215#define ractor_incidental_shareable_p(cond, val) \
1216 (!(cond) || rb_ractor_shareable_p(val))
1217#define ractor_object_incidental_shareable_p(obj, val) \
1218 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1219
1220ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1221static inline VALUE
1222vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1223{
1224 VALUE fields_obj;
1225#if OPT_IC_FOR_IVAR
1226 VALUE val = Qundef;
1227 VALUE *ivar_list;
1228
1229 if (SPECIAL_CONST_P(obj)) {
1230 return default_value;
1231 }
1232
1233 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(obj);
1234
1235 switch (BUILTIN_TYPE(obj)) {
1236 case T_OBJECT:
1237 ivar_list = ROBJECT_FIELDS(obj);
1238 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1239 break;
1240 case T_CLASS:
1241 case T_MODULE:
1242 {
1243 if (UNLIKELY(!rb_ractor_main_p())) {
1244 // For two reasons we can only use the fast path on the main
1245 // ractor.
1246 // First, only the main ractor is allowed to set ivars on classes
1247 // and modules. So we can skip locking.
1248 // Second, other ractors need to check the shareability of the
1249 // values returned from the class ivars.
1250
1251 if (default_value == Qundef) { // defined?
1252 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1253 }
1254 else {
1255 goto general_path;
1256 }
1257 }
1258
1259 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1260 if (!fields_obj) {
1261 return default_value;
1262 }
1263 ivar_list = rb_imemo_fields_ptr(fields_obj);
1264 shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1265
1266 break;
1267 }
1268 default:
1269 if (rb_obj_exivar_p(obj)) {
1270 VALUE fields_obj = 0;
1271 if (!rb_gen_fields_tbl_get(obj, id, &fields_obj)) {
1272 return default_value;
1273 }
1274 ivar_list = rb_imemo_fields_ptr(fields_obj);
1275 }
1276 else {
1277 return default_value;
1278 }
1279 }
1280
1281 shape_id_t cached_id;
1282 attr_index_t index;
1283
1284 if (is_attr) {
1285 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1286 }
1287 else {
1288 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1289 }
1290
1291 if (LIKELY(cached_id == shape_id)) {
1292 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1293
1294 if (index == ATTR_INDEX_NOT_SET) {
1295 return default_value;
1296 }
1297
1298 val = ivar_list[index];
1299#if USE_DEBUG_COUNTER
1300 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1301
1302 if (RB_TYPE_P(obj, T_OBJECT)) {
1303 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1304 }
1305#endif
1306 RUBY_ASSERT(!UNDEF_P(val));
1307 }
1308 else { // cache miss case
1309#if USE_DEBUG_COUNTER
1310 if (is_attr) {
1311 if (cached_id != INVALID_SHAPE_ID) {
1312 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1313 }
1314 else {
1315 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1316 }
1317 }
1318 else {
1319 if (cached_id != INVALID_SHAPE_ID) {
1320 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1321 }
1322 else {
1323 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1324 }
1325 }
1326 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1327
1328 if (RB_TYPE_P(obj, T_OBJECT)) {
1329 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1330 }
1331#endif
1332
1333 if (rb_shape_too_complex_p(shape_id)) {
1334 st_table *table = NULL;
1335 switch (BUILTIN_TYPE(obj)) {
1336 case T_CLASS:
1337 case T_MODULE:
1338 table = rb_imemo_fields_complex_tbl(fields_obj);
1339 break;
1340
1341 case T_OBJECT:
1342 table = ROBJECT_FIELDS_HASH(obj);
1343 break;
1344
1345 default: {
1346 VALUE fields_obj;
1347 if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
1348 table = rb_imemo_fields_complex_tbl(fields_obj);
1349 }
1350 break;
1351 }
1352 }
1353
1354 if (!table || !st_lookup(table, id, &val)) {
1355 val = default_value;
1356 }
1357 }
1358 else {
1359 shape_id_t previous_cached_id = cached_id;
1360 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1361 // This fills in the cache with the shared cache object.
1362 // "ent" is the shared cache object
1363 if (cached_id != previous_cached_id) {
1364 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1365 }
1366
1367 if (index == ATTR_INDEX_NOT_SET) {
1368 val = default_value;
1369 }
1370 else {
1371 // We fetched the ivar list above
1372 val = ivar_list[index];
1373 RUBY_ASSERT(!UNDEF_P(val));
1374 }
1375 }
1376 else {
1377 if (is_attr) {
1378 vm_cc_attr_index_initialize(cc, shape_id);
1379 }
1380 else {
1381 vm_ic_attr_index_initialize(ic, shape_id);
1382 }
1383
1384 val = default_value;
1385 }
1386 }
1387
1388 }
1389
1390 if (!UNDEF_P(default_value)) {
1391 RUBY_ASSERT(!UNDEF_P(val));
1392 }
1393
1394 RB_GC_GUARD(fields_obj);
1395 return val;
1396
1397general_path:
1398#endif /* OPT_IC_FOR_IVAR */
1399 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1400
1401 if (is_attr) {
1402 return rb_attr_get(obj, id);
1403 }
1404 else {
1405 return rb_ivar_get(obj, id);
1406 }
1407}
1408
1409static void
1410populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1411{
1412 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1413
1414 // Cache population code
1415 if (is_attr) {
1416 vm_cc_attr_index_set(cc, index, next_shape_id);
1417 }
1418 else {
1419 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1420 }
1421}
1422
1423ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1424NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1425NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1426
1427static VALUE
1428vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1429{
1430#if OPT_IC_FOR_IVAR
1431 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1432
1433 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1434 rb_check_frozen(obj);
1435
1436 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1437
1438 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1439
1440 if (!rb_shape_too_complex_p(next_shape_id)) {
1441 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1442 }
1443
1444 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1445 return val;
1446 }
1447#endif
1448 return rb_ivar_set(obj, id, val);
1449}
1450
1451static VALUE
1452vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1453{
1454 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1455}
1456
1457static VALUE
1458vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1459{
1460 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1461}
1462
1463NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1464static VALUE
1465vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1466{
1467 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1468
1469 VALUE fields_obj = 0;
1470
1471 // Cache hit case
1472 if (shape_id == dest_shape_id) {
1473 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1474 }
1475 else if (dest_shape_id != INVALID_SHAPE_ID) {
1476 if (shape_id == RSHAPE_PARENT(dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1477 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1478 }
1479 else {
1480 return Qundef;
1481 }
1482 }
1483 else {
1484 return Qundef;
1485 }
1486
1487 rb_gen_fields_tbl_get(obj, 0, &fields_obj);
1488
1489 if (shape_id != dest_shape_id) {
1490 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1491 }
1492
1493 RB_OBJ_WRITE(obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1494
1495 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1496
1497 return val;
1498}
1499
1500static inline VALUE
1501vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1502{
1503#if OPT_IC_FOR_IVAR
1504 switch (BUILTIN_TYPE(obj)) {
1505 case T_OBJECT:
1506 {
1507 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1508
1509 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1510 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1511
1512 if (LIKELY(shape_id == dest_shape_id)) {
1513 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1514 VM_ASSERT(!rb_ractor_shareable_p(obj));
1515 }
1516 else if (dest_shape_id != INVALID_SHAPE_ID) {
1517 shape_id_t source_shape_id = RSHAPE_PARENT(dest_shape_id);
1518
1519 if (shape_id == source_shape_id && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1521
1522 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1523
1524 RUBY_ASSERT(rb_shape_get_next_iv_shape(source_shape_id, id) == dest_shape_id);
1525 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1526 }
1527 else {
1528 break;
1529 }
1530 }
1531 else {
1532 break;
1533 }
1534
1535 VALUE *ptr = ROBJECT_FIELDS(obj);
1536
1537 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1538 RB_OBJ_WRITE(obj, &ptr[index], val);
1539
1540 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1541 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1542 return val;
1543 }
1544 break;
1545 case T_CLASS:
1546 case T_MODULE:
1547 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1548 default:
1549 break;
1550 }
1551
1552 return Qundef;
1553#endif /* OPT_IC_FOR_IVAR */
1554}
1555
1556static VALUE
1557update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1558{
1559 VALUE defined_class = 0;
1560 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1561
1562 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1563 defined_class = RBASIC(defined_class)->klass;
1564 }
1565
1566 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1567 if (!rb_cvc_tbl) {
1568 rb_bug("the cvc table should be set");
1569 }
1570
1571 VALUE ent_data;
1572 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1573 rb_bug("should have cvar cache entry");
1574 }
1575
1576 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1577
1578 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1579 ent->cref = cref;
1580 ic->entry = ent;
1581
1582 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1583 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1584 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1585 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1586
1587 return cvar_value;
1588}
1589
1590static inline VALUE
1591vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1592{
1593 const rb_cref_t *cref;
1594 cref = vm_get_cref(GET_EP());
1595
1596 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1597 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1598
1599 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1600 RUBY_ASSERT(!UNDEF_P(v));
1601
1602 return v;
1603 }
1604
1605 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1606
1607 return update_classvariable_cache(iseq, klass, id, cref, ic);
1608}
1609
1610VALUE
1611rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1612{
1613 return vm_getclassvariable(iseq, cfp, id, ic);
1614}
1615
1616static inline void
1617vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1618{
1619 const rb_cref_t *cref;
1620 cref = vm_get_cref(GET_EP());
1621
1622 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1623 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1624
1625 rb_class_ivar_set(ic->entry->class_value, id, val);
1626 return;
1627 }
1628
1629 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1630
1631 rb_cvar_set(klass, id, val);
1632
1633 update_classvariable_cache(iseq, klass, id, cref, ic);
1634}
1635
1636void
1637rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1638{
1639 vm_setclassvariable(iseq, cfp, id, val, ic);
1640}
1641
1642static inline VALUE
1643vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1644{
1645 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1646}
1647
1648static inline void
1649vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1650{
1651 if (RB_SPECIAL_CONST_P(obj)) {
1653 return;
1654 }
1655
1656 shape_id_t dest_shape_id;
1657 attr_index_t index;
1658 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1659
1660 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1661 switch (BUILTIN_TYPE(obj)) {
1662 case T_OBJECT:
1663 case T_CLASS:
1664 case T_MODULE:
1665 break;
1666 default:
1667 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1668 return;
1669 }
1670 }
1671 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1672 }
1673}
1674
1675void
1676rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1677{
1678 vm_setinstancevariable(iseq, obj, id, val, ic);
1679}
1680
1681static VALUE
1682vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1683{
1684 /* continue throw */
1685
1686 if (FIXNUM_P(err)) {
1687 ec->tag->state = RUBY_TAG_FATAL;
1688 }
1689 else if (SYMBOL_P(err)) {
1690 ec->tag->state = TAG_THROW;
1691 }
1692 else if (THROW_DATA_P(err)) {
1693 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1694 }
1695 else {
1696 ec->tag->state = TAG_RAISE;
1697 }
1698 return err;
1699}
1700
1701static VALUE
1702vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1703 const int flag, const VALUE throwobj)
1704{
1705 const rb_control_frame_t *escape_cfp = NULL;
1706 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1707
1708 if (flag != 0) {
1709 /* do nothing */
1710 }
1711 else if (state == TAG_BREAK) {
1712 int is_orphan = 1;
1713 const VALUE *ep = GET_EP();
1714 const rb_iseq_t *base_iseq = GET_ISEQ();
1715 escape_cfp = reg_cfp;
1716
1717 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1718 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1719 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1720 ep = escape_cfp->ep;
1721 base_iseq = escape_cfp->iseq;
1722 }
1723 else {
1724 ep = VM_ENV_PREV_EP(ep);
1725 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1726 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1727 VM_ASSERT(escape_cfp->iseq == base_iseq);
1728 }
1729 }
1730
1731 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1732 /* lambda{... break ...} */
1733 is_orphan = 0;
1734 state = TAG_RETURN;
1735 }
1736 else {
1737 ep = VM_ENV_PREV_EP(ep);
1738
1739 while (escape_cfp < eocfp) {
1740 if (escape_cfp->ep == ep) {
1741 const rb_iseq_t *const iseq = escape_cfp->iseq;
1742 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1743 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1744 unsigned int i;
1745
1746 if (!ct) break;
1747 for (i=0; i < ct->size; i++) {
1748 const struct iseq_catch_table_entry *const entry =
1749 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1750
1751 if (entry->type == CATCH_TYPE_BREAK &&
1752 entry->iseq == base_iseq &&
1753 entry->start < epc && entry->end >= epc) {
1754 if (entry->cont == epc) { /* found! */
1755 is_orphan = 0;
1756 }
1757 break;
1758 }
1759 }
1760 break;
1761 }
1762
1763 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1764 }
1765 }
1766
1767 if (is_orphan) {
1768 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1769 }
1770 }
1771 else if (state == TAG_RETRY) {
1772 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1773
1774 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1775 }
1776 else if (state == TAG_RETURN) {
1777 const VALUE *current_ep = GET_EP();
1778 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1779 int in_class_frame = 0;
1780 int toplevel = 1;
1781 escape_cfp = reg_cfp;
1782
1783 // find target_lep, target_ep
1784 while (!VM_ENV_LOCAL_P(ep)) {
1785 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1786 target_ep = ep;
1787 }
1788 ep = VM_ENV_PREV_EP(ep);
1789 }
1790 target_lep = ep;
1791
1792 while (escape_cfp < eocfp) {
1793 const VALUE *lep = VM_CF_LEP(escape_cfp);
1794
1795 if (!target_lep) {
1796 target_lep = lep;
1797 }
1798
1799 if (lep == target_lep &&
1800 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1801 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1802 in_class_frame = 1;
1803 target_lep = 0;
1804 }
1805
1806 if (lep == target_lep) {
1807 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1808 toplevel = 0;
1809 if (in_class_frame) {
1810 /* lambda {class A; ... return ...; end} */
1811 goto valid_return;
1812 }
1813 else {
1814 const VALUE *tep = current_ep;
1815
1816 while (target_lep != tep) {
1817 if (escape_cfp->ep == tep) {
1818 /* in lambda */
1819 if (tep == target_ep) {
1820 goto valid_return;
1821 }
1822 else {
1823 goto unexpected_return;
1824 }
1825 }
1826 tep = VM_ENV_PREV_EP(tep);
1827 }
1828 }
1829 }
1830 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1831 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1832 case ISEQ_TYPE_TOP:
1833 case ISEQ_TYPE_MAIN:
1834 if (toplevel) {
1835 if (in_class_frame) goto unexpected_return;
1836 if (target_ep == NULL) {
1837 goto valid_return;
1838 }
1839 else {
1840 goto unexpected_return;
1841 }
1842 }
1843 break;
1844 case ISEQ_TYPE_EVAL: {
1845 const rb_iseq_t *is = escape_cfp->iseq;
1846 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1847 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1848 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1849 t = ISEQ_BODY(is)->type;
1850 }
1851 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1852 break;
1853 }
1854 case ISEQ_TYPE_CLASS:
1855 toplevel = 0;
1856 break;
1857 default:
1858 break;
1859 }
1860 }
1861 }
1862
1863 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1864 if (target_ep == NULL) {
1865 goto valid_return;
1866 }
1867 else {
1868 goto unexpected_return;
1869 }
1870 }
1871
1872 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1873 }
1874 unexpected_return:;
1875 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1876
1877 valid_return:;
1878 /* do nothing */
1879 }
1880 else {
1881 rb_bug("isns(throw): unsupported throw type");
1882 }
1883
1884 ec->tag->state = state;
1885 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1886}
1887
1888static VALUE
1889vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1890 rb_num_t throw_state, VALUE throwobj)
1891{
1892 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1893 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1894
1895 if (state != 0) {
1896 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1897 }
1898 else {
1899 return vm_throw_continue(ec, throwobj);
1900 }
1901}
1902
1903VALUE
1904rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1905{
1906 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1907}
1908
1909static inline void
1910vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1911{
1912 int is_splat = flag & 0x01;
1913 const VALUE *ptr;
1914 rb_num_t len;
1915 const VALUE obj = ary;
1916
1917 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1918 ary = obj;
1919 ptr = &ary;
1920 len = 1;
1921 }
1922 else {
1923 ptr = RARRAY_CONST_PTR(ary);
1924 len = (rb_num_t)RARRAY_LEN(ary);
1925 }
1926
1927 if (num + is_splat == 0) {
1928 /* no space left on stack */
1929 }
1930 else if (flag & 0x02) {
1931 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1932 rb_num_t i = 0, j;
1933
1934 if (len < num) {
1935 for (i = 0; i < num - len; i++) {
1936 *cfp->sp++ = Qnil;
1937 }
1938 }
1939
1940 for (j = 0; i < num; i++, j++) {
1941 VALUE v = ptr[len - j - 1];
1942 *cfp->sp++ = v;
1943 }
1944
1945 if (is_splat) {
1946 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1947 }
1948 }
1949 else {
1950 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1951 if (is_splat) {
1952 if (num > len) {
1953 *cfp->sp++ = rb_ary_new();
1954 }
1955 else {
1956 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1957 }
1958 }
1959
1960 if (num > len) {
1961 rb_num_t i = 0;
1962 for (; i < num - len; i++) {
1963 *cfp->sp++ = Qnil;
1964 }
1965
1966 for (rb_num_t j = 0; i < num; i++, j++) {
1967 *cfp->sp++ = ptr[len - j - 1];
1968 }
1969 }
1970 else {
1971 for (rb_num_t j = 0; j < num; j++) {
1972 *cfp->sp++ = ptr[num - j - 1];
1973 }
1974 }
1975 }
1976
1977 RB_GC_GUARD(ary);
1978}
1979
1980static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1981
1982static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1983
1984static struct rb_class_cc_entries *
1985vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1986{
1987 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1988#if VM_CHECK_MODE > 0
1989 ccs->debug_sig = ~(VALUE)ccs;
1990#endif
1991 ccs->capa = 0;
1992 ccs->len = 0;
1993 ccs->cme = cme;
1994 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1995 ccs->entries = NULL;
1996
1997 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1998 RB_OBJ_WRITTEN(klass, Qundef, cme);
1999 return ccs;
2000}
2001
2002static void
2003vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2004{
2005 if (! vm_cc_markable(cc)) {
2006 return;
2007 }
2008
2009 if (UNLIKELY(ccs->len == ccs->capa)) {
2010 if (ccs->capa == 0) {
2011 ccs->capa = 1;
2012 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2013 }
2014 else {
2015 ccs->capa *= 2;
2016 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2017 }
2018 }
2019 VM_ASSERT(ccs->len < ccs->capa);
2020
2021 const int pos = ccs->len++;
2022 ccs->entries[pos].argc = vm_ci_argc(ci);
2023 ccs->entries[pos].flag = vm_ci_flag(ci);
2024 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2025
2026 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2027 // for tuning
2028 // vm_mtbl_dump(klass, 0);
2029 }
2030}
2031
2032#if VM_CHECK_MODE > 0
2033void
2034rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2035{
2036 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2037 for (int i=0; i<ccs->len; i++) {
2038 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2039 ccs->entries[i].flag,
2040 ccs->entries[i].argc);
2041 rp(ccs->entries[i].cc);
2042 }
2043}
2044
2045static int
2046vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2047{
2048 VM_ASSERT(vm_ccs_p(ccs));
2049 VM_ASSERT(ccs->len <= ccs->capa);
2050
2051 for (int i=0; i<ccs->len; i++) {
2052 const struct rb_callcache *cc = ccs->entries[i].cc;
2053
2054 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2055 VM_ASSERT(vm_cc_class_check(cc, klass));
2056 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2057 VM_ASSERT(!vm_cc_super_p(cc));
2058 VM_ASSERT(!vm_cc_refinement_p(cc));
2059 }
2060 return TRUE;
2061}
2062#endif
2063
2064const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2065
2066static const struct rb_callcache *
2067vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2068{
2069 const ID mid = vm_ci_mid(ci);
2070 struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2071 struct rb_class_cc_entries *ccs = NULL;
2072 VALUE ccs_data;
2073
2074 if (cc_tbl) {
2075 // CCS data is keyed on method id, so we don't need the method id
2076 // for doing comparisons in the `for` loop below.
2077 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2078 ccs = (struct rb_class_cc_entries *)ccs_data;
2079 const int ccs_len = ccs->len;
2080
2081 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2082 rb_vm_ccs_free(ccs);
2083 rb_id_table_delete(cc_tbl, mid);
2084 ccs = NULL;
2085 }
2086 else {
2087 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2088
2089 // We already know the method id is correct because we had
2090 // to look up the ccs_data by method id. All we need to
2091 // compare is argc and flag
2092 unsigned int argc = vm_ci_argc(ci);
2093 unsigned int flag = vm_ci_flag(ci);
2094
2095 for (int i=0; i<ccs_len; i++) {
2096 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2097 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2098 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2099
2100 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2101
2102 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2103 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2104
2105 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2106 VM_ASSERT(ccs_cc->klass == klass);
2107 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2108
2109 return ccs_cc;
2110 }
2111 }
2112 }
2113 }
2114 }
2115 else {
2116 cc_tbl = rb_id_table_create(2);
2117 RCLASS_WRITE_CC_TBL(klass, cc_tbl);
2118 }
2119
2120 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2121
2122 const rb_callable_method_entry_t *cme;
2123
2124 if (ccs) {
2125 cme = ccs->cme;
2126 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2127
2128 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2129 }
2130 else {
2131 cme = rb_callable_method_entry(klass, mid);
2132 }
2133
2134 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2135
2136 if (cme == NULL) {
2137 // undef or not found: can't cache the information
2138 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2139 return &vm_empty_cc;
2140 }
2141
2142 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2143
2144 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2145
2146 if (ccs == NULL) {
2147 VM_ASSERT(cc_tbl != NULL);
2148
2149 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2150 // rb_callable_method_entry() prepares ccs.
2151 ccs = (struct rb_class_cc_entries *)ccs_data;
2152 }
2153 else {
2154 // TODO: required?
2155 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2156 }
2157 }
2158
2159 cme = rb_check_overloaded_cme(cme, ci);
2160
2161 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2162 vm_ccs_push(klass, ccs, ci, cc);
2163
2164 VM_ASSERT(vm_cc_cme(cc) != NULL);
2165 VM_ASSERT(cme->called_id == mid);
2166 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2167
2168 return cc;
2169}
2170
2171const struct rb_callcache *
2172rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2173{
2174 const struct rb_callcache *cc;
2175
2176 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2177
2178 RB_VM_LOCKING() {
2179 cc = vm_search_cc(klass, ci);
2180
2181 VM_ASSERT(cc);
2182 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2183 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2184 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2185 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2186 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2187 }
2188
2189 return cc;
2190}
2191
2192static const struct rb_callcache *
2193vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2194{
2195#if USE_DEBUG_COUNTER
2196 const struct rb_callcache *old_cc = cd->cc;
2197#endif
2198
2199 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2200
2201#if OPT_INLINE_METHOD_CACHE
2202 cd->cc = cc;
2203
2204 const struct rb_callcache *empty_cc = &vm_empty_cc;
2205 if (cd_owner && cc != empty_cc) {
2206 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2207 }
2208
2209#if USE_DEBUG_COUNTER
2210 if (!old_cc || old_cc == empty_cc) {
2211 // empty
2212 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2213 }
2214 else if (old_cc == cc) {
2215 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2216 }
2217 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2218 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2219 }
2220 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2221 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2222 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2223 }
2224 else {
2225 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2226 }
2227#endif
2228#endif // OPT_INLINE_METHOD_CACHE
2229
2230 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2231 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2232
2233 return cc;
2234}
2235
2236ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2237static const struct rb_callcache *
2238vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2239{
2240 const struct rb_callcache *cc = cd->cc;
2241
2242#if OPT_INLINE_METHOD_CACHE
2243 if (LIKELY(vm_cc_class_check(cc, klass))) {
2244 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2245 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2246 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2247 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2248 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2249 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2250
2251 return cc;
2252 }
2253 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2254 }
2255 else {
2256 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2257 }
2258#endif
2259
2260 return vm_search_method_slowpath0(cd_owner, cd, klass);
2261}
2262
2263static const struct rb_callcache *
2264vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2265{
2266 VALUE klass = CLASS_OF(recv);
2267 VM_ASSERT(klass != Qfalse);
2268 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2269
2270 return vm_search_method_fastpath(cd_owner, cd, klass);
2271}
2272
2273#if __has_attribute(transparent_union)
2274typedef union {
2275 VALUE (*anyargs)(ANYARGS);
2276 VALUE (*f00)(VALUE);
2277 VALUE (*f01)(VALUE, VALUE);
2278 VALUE (*f02)(VALUE, VALUE, VALUE);
2279 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2280 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2281 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2282 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2283 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2292 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2293} __attribute__((__transparent_union__)) cfunc_type;
2294# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2295#else
2296typedef VALUE (*cfunc_type)(ANYARGS);
2297# define make_cfunc_type(f) (cfunc_type)(f)
2298#endif
2299
2300static inline int
2301check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2302{
2303 if (! me) {
2304 return false;
2305 }
2306 else {
2307 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2308 VM_ASSERT(callable_method_entry_p(me));
2309 VM_ASSERT(me->def);
2310 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2311 return false;
2312 }
2313 else {
2314#if __has_attribute(transparent_union)
2315 return me->def->body.cfunc.func == func.anyargs;
2316#else
2317 return me->def->body.cfunc.func == func;
2318#endif
2319 }
2320 }
2321}
2322
2323static inline int
2324check_method_basic_definition(const rb_callable_method_entry_t *me)
2325{
2326 return me && METHOD_ENTRY_BASIC(me);
2327}
2328
2329static inline int
2330vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2331{
2332 VM_ASSERT(iseq != NULL);
2333 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2334 return check_cfunc(vm_cc_cme(cc), func);
2335}
2336
2337#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2338#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2339
2340#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2341
2342static inline bool
2343FIXNUM_2_P(VALUE a, VALUE b)
2344{
2345 /* FIXNUM_P(a) && FIXNUM_P(b)
2346 * == ((a & 1) && (b & 1))
2347 * == a & b & 1 */
2348 SIGNED_VALUE x = a;
2349 SIGNED_VALUE y = b;
2350 SIGNED_VALUE z = x & y & 1;
2351 return z == 1;
2352}
2353
2354static inline bool
2355FLONUM_2_P(VALUE a, VALUE b)
2356{
2357#if USE_FLONUM
2358 /* FLONUM_P(a) && FLONUM_P(b)
2359 * == ((a & 3) == 2) && ((b & 3) == 2)
2360 * == ! ((a ^ 2) | (b ^ 2) & 3)
2361 */
2362 SIGNED_VALUE x = a;
2363 SIGNED_VALUE y = b;
2364 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2365 return !z;
2366#else
2367 return false;
2368#endif
2369}
2370
2371static VALUE
2372opt_equality_specialized(VALUE recv, VALUE obj)
2373{
2374 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2375 goto compare_by_identity;
2376 }
2377 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2378 goto compare_by_identity;
2379 }
2380 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2381 goto compare_by_identity;
2382 }
2383 else if (SPECIAL_CONST_P(recv)) {
2384 //
2385 }
2386 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2387 double a = RFLOAT_VALUE(recv);
2388 double b = RFLOAT_VALUE(obj);
2389
2390#if MSC_VERSION_BEFORE(1300)
2391 if (isnan(a)) {
2392 return Qfalse;
2393 }
2394 else if (isnan(b)) {
2395 return Qfalse;
2396 }
2397 else
2398#endif
2399 return RBOOL(a == b);
2400 }
2401 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2402 if (recv == obj) {
2403 return Qtrue;
2404 }
2405 else if (RB_TYPE_P(obj, T_STRING)) {
2406 return rb_str_eql_internal(obj, recv);
2407 }
2408 }
2409 return Qundef;
2410
2411 compare_by_identity:
2412 return RBOOL(recv == obj);
2413}
2414
2415static VALUE
2416opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2417{
2418 VM_ASSERT(cd_owner != NULL);
2419
2420 VALUE val = opt_equality_specialized(recv, obj);
2421 if (!UNDEF_P(val)) return val;
2422
2423 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2424 return Qundef;
2425 }
2426 else {
2427 return RBOOL(recv == obj);
2428 }
2429}
2430
2431#undef EQ_UNREDEFINED_P
2432
2433static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2434NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2435
2436static VALUE
2437opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2438{
2439 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2440
2441 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2442 return RBOOL(recv == obj);
2443 }
2444 else {
2445 return Qundef;
2446 }
2447}
2448
2449static VALUE
2450opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2451{
2452 VALUE val = opt_equality_specialized(recv, obj);
2453 if (!UNDEF_P(val)) {
2454 return val;
2455 }
2456 else {
2457 return opt_equality_by_mid_slowpath(recv, obj, mid);
2458 }
2459}
2460
2461VALUE
2462rb_equal_opt(VALUE obj1, VALUE obj2)
2463{
2464 return opt_equality_by_mid(obj1, obj2, idEq);
2465}
2466
2467VALUE
2468rb_eql_opt(VALUE obj1, VALUE obj2)
2469{
2470 return opt_equality_by_mid(obj1, obj2, idEqlP);
2471}
2472
2473extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2474extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2475
2476static VALUE
2477check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2478{
2479 switch (type) {
2480 case VM_CHECKMATCH_TYPE_WHEN:
2481 return pattern;
2482 case VM_CHECKMATCH_TYPE_RESCUE:
2483 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2484 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2485 }
2486 /* fall through */
2487 case VM_CHECKMATCH_TYPE_CASE: {
2488 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2489 }
2490 default:
2491 rb_bug("check_match: unreachable");
2492 }
2493}
2494
2495
2496#if MSC_VERSION_BEFORE(1300)
2497#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2498#else
2499#define CHECK_CMP_NAN(a, b) /* do nothing */
2500#endif
2501
2502static inline VALUE
2503double_cmp_lt(double a, double b)
2504{
2505 CHECK_CMP_NAN(a, b);
2506 return RBOOL(a < b);
2507}
2508
2509static inline VALUE
2510double_cmp_le(double a, double b)
2511{
2512 CHECK_CMP_NAN(a, b);
2513 return RBOOL(a <= b);
2514}
2515
2516static inline VALUE
2517double_cmp_gt(double a, double b)
2518{
2519 CHECK_CMP_NAN(a, b);
2520 return RBOOL(a > b);
2521}
2522
2523static inline VALUE
2524double_cmp_ge(double a, double b)
2525{
2526 CHECK_CMP_NAN(a, b);
2527 return RBOOL(a >= b);
2528}
2529
2530// Copied by vm_dump.c
2531static inline VALUE *
2532vm_base_ptr(const rb_control_frame_t *cfp)
2533{
2534 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2535
2536 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2537 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2538
2539 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2540 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2541 int params = ISEQ_BODY(cfp->iseq)->param.size;
2542
2543 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2544 bp += vm_ci_argc(ci);
2545 }
2546
2547 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2548 /* adjust `self' */
2549 bp += 1;
2550 }
2551#if VM_DEBUG_BP_CHECK
2552 if (bp != cfp->bp_check) {
2553 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2554 (long)(cfp->bp_check - GET_EC()->vm_stack),
2555 (long)(bp - GET_EC()->vm_stack));
2556 rb_bug("vm_base_ptr: unreachable");
2557 }
2558#endif
2559 return bp;
2560 }
2561 else {
2562 return NULL;
2563 }
2564}
2565
2566VALUE *
2567rb_vm_base_ptr(const rb_control_frame_t *cfp)
2568{
2569 return vm_base_ptr(cfp);
2570}
2571
2572/* method call processes with call_info */
2573
2574#include "vm_args.c"
2575
2576static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2577ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2578static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2579static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2580static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2581static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2582static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2583
2584static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2585
2586static VALUE
2587vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2588{
2589 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2590
2591 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2592}
2593
2594static VALUE
2595vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2596{
2597 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2598
2599 const struct rb_callcache *cc = calling->cc;
2600 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2601 int param = ISEQ_BODY(iseq)->param.size;
2602 int local = ISEQ_BODY(iseq)->local_table_size;
2603 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2604}
2605
2606bool
2607rb_simple_iseq_p(const rb_iseq_t *iseq)
2608{
2609 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2610 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2611 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2612 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2613 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2614 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2615 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2616 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2617}
2618
2619bool
2620rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2621{
2622 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2623 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2624 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2627 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2628 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2629 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2630}
2631
2632bool
2633rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2634{
2635 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2636 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2637 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2638 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2639 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2640 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2641 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2642}
2643
2644#define ALLOW_HEAP_ARGV (-2)
2645#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2646
2647static inline bool
2648vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2649{
2650 vm_check_canary(GET_EC(), cfp->sp);
2651 bool ret = false;
2652
2653 if (!NIL_P(ary)) {
2654 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2655 long len = RARRAY_LEN(ary);
2656 int argc = calling->argc;
2657
2658 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2659 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2660 * a temporary array, instead of trying to keeping arguments on the VM stack.
2661 */
2662 VALUE *argv = cfp->sp - argc;
2663 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2664 rb_ary_cat(argv_ary, argv, argc);
2665 rb_ary_cat(argv_ary, ptr, len);
2666 cfp->sp -= argc - 1;
2667 cfp->sp[-1] = argv_ary;
2668 calling->argc = 1;
2669 calling->heap_argv = argv_ary;
2670 RB_GC_GUARD(ary);
2671 }
2672 else {
2673 long i;
2674
2675 if (max_args >= 0 && len + argc > max_args) {
2676 /* If only a given max_args is allowed, copy up to max args.
2677 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2678 * where additional arguments are ignored.
2679 *
2680 * Also, copy up to one more argument than the maximum,
2681 * in case it is an empty keyword hash that will be removed.
2682 */
2683 calling->argc += len - (max_args - argc + 1);
2684 len = max_args - argc + 1;
2685 ret = true;
2686 }
2687 else {
2688 /* Unset heap_argv if set originally. Can happen when
2689 * forwarding modified arguments, where heap_argv was used
2690 * originally, but heap_argv not supported by the forwarded
2691 * method in all cases.
2692 */
2693 calling->heap_argv = 0;
2694 }
2695 CHECK_VM_STACK_OVERFLOW(cfp, len);
2696
2697 for (i = 0; i < len; i++) {
2698 *cfp->sp++ = ptr[i];
2699 }
2700 calling->argc += i;
2701 }
2702 }
2703
2704 return ret;
2705}
2706
2707static inline void
2708vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2709{
2710 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2711 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2712 const VALUE h = rb_hash_new_with_size(kw_len);
2713 VALUE *sp = cfp->sp;
2714 int i;
2715
2716 for (i=0; i<kw_len; i++) {
2717 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2718 }
2719 (sp-kw_len)[0] = h;
2720
2721 cfp->sp -= kw_len - 1;
2722 calling->argc -= kw_len - 1;
2723 calling->kw_splat = 1;
2724}
2725
2726static inline VALUE
2727vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2728{
2729 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2730 if (keyword_hash != Qnil) {
2731 /* Convert a non-hash keyword splat to a new hash */
2732 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2733 }
2734 }
2735 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2736 /* Convert a hash keyword splat to a new hash unless
2737 * a mutable keyword splat was passed.
2738 * Skip allocating new hash for empty keyword splat, as empty
2739 * keyword splat will be ignored by both callers.
2740 */
2741 keyword_hash = rb_hash_dup(keyword_hash);
2742 }
2743 return keyword_hash;
2744}
2745
2746static inline void
2747CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2748 struct rb_calling_info *restrict calling,
2749 const struct rb_callinfo *restrict ci, int max_args)
2750{
2751 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2752 if (IS_ARGS_KW_SPLAT(ci)) {
2753 // f(*a, **kw)
2754 VM_ASSERT(calling->kw_splat == 1);
2755
2756 cfp->sp -= 2;
2757 calling->argc -= 2;
2758 VALUE ary = cfp->sp[0];
2759 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2760
2761 // splat a
2762 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2763
2764 // put kw
2765 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2766 if (UNLIKELY(calling->heap_argv)) {
2767 rb_ary_push(calling->heap_argv, kwh);
2768 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2769 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2770 calling->kw_splat = 0;
2771 }
2772 }
2773 else {
2774 cfp->sp[0] = kwh;
2775 cfp->sp++;
2776 calling->argc++;
2777
2778 VM_ASSERT(calling->kw_splat == 1);
2779 }
2780 }
2781 else {
2782 calling->kw_splat = 0;
2783 }
2784 }
2785 else {
2786 // f(*a)
2787 VM_ASSERT(calling->kw_splat == 0);
2788
2789 cfp->sp -= 1;
2790 calling->argc -= 1;
2791 VALUE ary = cfp->sp[0];
2792
2793 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2794 goto check_keyword;
2795 }
2796
2797 // check the last argument
2798 VALUE last_hash, argv_ary;
2799 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2800 if (!IS_ARGS_KEYWORD(ci) &&
2801 RARRAY_LEN(argv_ary) > 0 &&
2802 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2803 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2804
2805 rb_ary_pop(argv_ary);
2806 if (!RHASH_EMPTY_P(last_hash)) {
2807 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2808 calling->kw_splat = 1;
2809 }
2810 }
2811 }
2812 else {
2813check_keyword:
2814 if (!IS_ARGS_KEYWORD(ci) &&
2815 calling->argc > 0 &&
2816 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2817 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2818
2819 if (RHASH_EMPTY_P(last_hash)) {
2820 calling->argc--;
2821 cfp->sp -= 1;
2822 }
2823 else {
2824 cfp->sp[-1] = rb_hash_dup(last_hash);
2825 calling->kw_splat = 1;
2826 }
2827 }
2828 }
2829 }
2830 }
2831 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2832 // f(**kw)
2833 VM_ASSERT(calling->kw_splat == 1);
2834 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2835
2836 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2837 cfp->sp--;
2838 calling->argc--;
2839 calling->kw_splat = 0;
2840 }
2841 else {
2842 cfp->sp[-1] = kwh;
2843 }
2844 }
2845 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2846 // f(k1:1, k2:2)
2847 VM_ASSERT(calling->kw_splat == 0);
2848
2849 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2850 * by creating a keyword hash.
2851 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2852 */
2853 vm_caller_setup_arg_kw(cfp, calling, ci);
2854 }
2855}
2856
2857#define USE_OPT_HIST 0
2858
2859#if USE_OPT_HIST
2860#define OPT_HIST_MAX 64
2861static int opt_hist[OPT_HIST_MAX+1];
2862
2863__attribute__((destructor))
2864static void
2865opt_hist_show_results_at_exit(void)
2866{
2867 for (int i=0; i<OPT_HIST_MAX; i++) {
2868 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2869 }
2870}
2871#endif
2872
2873static VALUE
2874vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2875 struct rb_calling_info *calling)
2876{
2877 const struct rb_callcache *cc = calling->cc;
2878 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2879 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2880 const int opt = calling->argc - lead_num;
2881 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2882 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2883 const int param = ISEQ_BODY(iseq)->param.size;
2884 const int local = ISEQ_BODY(iseq)->local_table_size;
2885 const int delta = opt_num - opt;
2886
2887 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2888
2889#if USE_OPT_HIST
2890 if (opt_pc < OPT_HIST_MAX) {
2891 opt_hist[opt]++;
2892 }
2893 else {
2894 opt_hist[OPT_HIST_MAX]++;
2895 }
2896#endif
2897
2898 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2899}
2900
2901static VALUE
2902vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2903 struct rb_calling_info *calling)
2904{
2905 const struct rb_callcache *cc = calling->cc;
2906 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2907 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2908 const int opt = calling->argc - lead_num;
2909 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2910
2911 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2912
2913#if USE_OPT_HIST
2914 if (opt_pc < OPT_HIST_MAX) {
2915 opt_hist[opt]++;
2916 }
2917 else {
2918 opt_hist[OPT_HIST_MAX]++;
2919 }
2920#endif
2921
2922 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2923}
2924
2925static void
2926args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
2927 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2928 VALUE *const locals);
2929
2930static VALUE
2931vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2932 struct rb_calling_info *calling)
2933{
2934 const struct rb_callcache *cc = calling->cc;
2935 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2936 int param_size = ISEQ_BODY(iseq)->param.size;
2937 int local_size = ISEQ_BODY(iseq)->local_table_size;
2938
2939 // Setting up local size and param size
2940 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2941
2942 local_size = local_size + vm_ci_argc(calling->cd->ci);
2943 param_size = param_size + vm_ci_argc(calling->cd->ci);
2944
2945 cfp->sp[0] = (VALUE)calling->cd->ci;
2946
2947 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2948}
2949
2950static VALUE
2951vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2952 struct rb_calling_info *calling)
2953{
2954 const struct rb_callinfo *ci = calling->cd->ci;
2955 const struct rb_callcache *cc = calling->cc;
2956
2957 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2958 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2959
2960 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2961 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2962 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2963 const int ci_kw_len = kw_arg->keyword_len;
2964 const VALUE * const ci_keywords = kw_arg->keywords;
2965 VALUE *argv = cfp->sp - calling->argc;
2966 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2967 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2968 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2969 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2970 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
2971
2972 int param = ISEQ_BODY(iseq)->param.size;
2973 int local = ISEQ_BODY(iseq)->local_table_size;
2974 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2975}
2976
2977static VALUE
2978vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2979 struct rb_calling_info *calling)
2980{
2981 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2982 const struct rb_callcache *cc = calling->cc;
2983
2984 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2985 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2986
2987 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2988 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2989 VALUE * const argv = cfp->sp - calling->argc;
2990 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
2991
2992 int i;
2993 for (i=0; i<kw_param->num; i++) {
2994 klocals[i] = kw_param->default_values[i];
2995 }
2996 klocals[i] = INT2FIX(0); // kw specify flag
2997 // NOTE:
2998 // nobody check this value, but it should be cleared because it can
2999 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3000
3001 int param = ISEQ_BODY(iseq)->param.size;
3002 int local = ISEQ_BODY(iseq)->local_table_size;
3003 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3004}
3005
3006static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3007
3008static VALUE
3009vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3010 struct rb_calling_info *calling)
3011{
3012 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3013 cfp->sp -= (calling->argc + 1);
3014 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3015 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3016}
3017
3018VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3019
3020static void
3021warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3022{
3023 rb_vm_t *vm = GET_VM();
3024 set_table *dup_check_table = vm->unused_block_warning_table;
3025 st_data_t key;
3026 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3027
3028 union {
3029 VALUE v;
3030 unsigned char b[SIZEOF_VALUE];
3031 } k1 = {
3032 .v = (VALUE)pc,
3033 }, k2 = {
3034 .v = (VALUE)cme->def,
3035 };
3036
3037 // relax check
3038 if (!strict_unused_block) {
3039 key = (st_data_t)cme->def->original_id;
3040
3041 if (set_table_lookup(dup_check_table, key)) {
3042 return;
3043 }
3044 }
3045
3046 // strict check
3047 // make unique key from pc and me->def pointer
3048 key = 0;
3049 for (int i=0; i<SIZEOF_VALUE; i++) {
3050 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3051 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3052 }
3053
3054 if (0) {
3055 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3056 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3057 fprintf(stderr, "key:%p\n", (void *)key);
3058 }
3059
3060 // duplication check
3061 if (set_insert(dup_check_table, key)) {
3062 // already shown
3063 }
3064 else if (RTEST(ruby_verbose) || strict_unused_block) {
3065 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3066 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3067
3068 if (!NIL_P(m_loc)) {
3069 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3070 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3071 }
3072 else {
3073 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3074 }
3075 }
3076}
3077
3078static inline int
3079vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3080 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3081{
3082 const struct rb_callinfo *ci = calling->cd->ci;
3083 const struct rb_callcache *cc = calling->cc;
3084
3085 VM_ASSERT((vm_ci_argc(ci), 1));
3086 VM_ASSERT(vm_cc_cme(cc) != NULL);
3087
3088 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3089 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3090 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3091 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3092 }
3093
3094 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3095 if (LIKELY(rb_simple_iseq_p(iseq))) {
3096 rb_control_frame_t *cfp = ec->cfp;
3097 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3098 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3099
3100 if (calling->argc != lead_num) {
3101 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3102 }
3103
3104 //VM_ASSERT(ci == calling->cd->ci);
3105 VM_ASSERT(cc == calling->cc);
3106
3107 if (vm_call_iseq_optimizable_p(ci, cc)) {
3108 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3109 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3110 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3111 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3112 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3113 }
3114 else {
3115 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3116 }
3117 }
3118 return 0;
3119 }
3120 else if (rb_iseq_only_optparam_p(iseq)) {
3121 rb_control_frame_t *cfp = ec->cfp;
3122
3123 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3124 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3125
3126 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3127 const int argc = calling->argc;
3128 const int opt = argc - lead_num;
3129
3130 if (opt < 0 || opt > opt_num) {
3131 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3132 }
3133
3134 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3135 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3136 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3137 vm_call_cacheable(ci, cc));
3138 }
3139 else {
3140 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3141 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3142 vm_call_cacheable(ci, cc));
3143 }
3144
3145 /* initialize opt vars for self-references */
3146 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3147 for (int i=argc; i<lead_num + opt_num; i++) {
3148 argv[i] = Qnil;
3149 }
3150 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3151 }
3152 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3153 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3154 const int argc = calling->argc;
3155 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3156
3157 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3158 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3159
3160 if (argc - kw_arg->keyword_len == lead_num) {
3161 const int ci_kw_len = kw_arg->keyword_len;
3162 const VALUE * const ci_keywords = kw_arg->keywords;
3163 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3164 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3165
3166 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3167 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3168
3169 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3170 vm_call_cacheable(ci, cc));
3171
3172 return 0;
3173 }
3174 }
3175 else if (argc == lead_num) {
3176 /* no kwarg */
3177 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3178 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3179
3180 if (klocals[kw_param->num] == INT2FIX(0)) {
3181 /* copy from default_values */
3182 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3183 vm_call_cacheable(ci, cc));
3184 }
3185
3186 return 0;
3187 }
3188 }
3189 }
3190
3191 // Called iseq is using ... param
3192 // def foo(...) # <- iseq for foo will have "forwardable"
3193 //
3194 // We want to set the `...` local to the caller's CI
3195 // foo(1, 2) # <- the ci for this should end up as `...`
3196 //
3197 // So hopefully the stack looks like:
3198 //
3199 // => 1
3200 // => 2
3201 // => *
3202 // => **
3203 // => &
3204 // => ... # <- points at `foo`s CI
3205 // => cref_or_me
3206 // => specval
3207 // => type
3208 //
3209 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3210 bool can_fastpath = true;
3211
3212 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3213 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3214 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3215 ci = vm_ci_new_runtime(
3216 vm_ci_mid(ci),
3217 vm_ci_flag(ci),
3218 vm_ci_argc(ci),
3219 vm_ci_kwarg(ci));
3220 }
3221 else {
3222 ci = forward_cd->caller_ci;
3223 }
3224 can_fastpath = false;
3225 }
3226 // C functions calling iseqs will stack allocate a CI,
3227 // so we need to convert it to heap allocated
3228 if (!vm_ci_markable(ci)) {
3229 ci = vm_ci_new_runtime(
3230 vm_ci_mid(ci),
3231 vm_ci_flag(ci),
3232 vm_ci_argc(ci),
3233 vm_ci_kwarg(ci));
3234 can_fastpath = false;
3235 }
3236 argv[param_size - 1] = (VALUE)ci;
3237 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3238 return 0;
3239 }
3240
3241 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3242}
3243
3244static void
3245vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3246{
3247 // This case is when the caller is using a ... parameter.
3248 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3249 // In this case the caller's caller's CI will be on the stack.
3250 //
3251 // For example:
3252 //
3253 // def bar(a, b); a + b; end
3254 // def foo(...); bar(...); end
3255 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3256 //
3257 // Stack layout will be:
3258 //
3259 // > 1
3260 // > 2
3261 // > CI for foo(1, 2)
3262 // > cref_or_me
3263 // > specval
3264 // > type
3265 // > receiver
3266 // > CI for foo(1, 2), via `getlocal ...`
3267 // > ( SP points here )
3268 const VALUE * lep = VM_CF_LEP(cfp);
3269
3270 const rb_iseq_t *iseq;
3271
3272 // If we're in an escaped environment (lambda for example), get the iseq
3273 // from the captured env.
3274 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3275 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3276 iseq = env->iseq;
3277 }
3278 else { // Otherwise use the lep to find the caller
3279 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3280 }
3281
3282 // Our local storage is below the args we need to copy
3283 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3284
3285 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3286 VALUE * to = cfp->sp - 1; // clobber the CI
3287
3288 if (RTEST(splat)) {
3289 to -= 1; // clobber the splat array
3290 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3291 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3292 to += RARRAY_LEN(splat);
3293 }
3294
3295 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3296 MEMCPY(to, from, VALUE, argc);
3297 cfp->sp = to + argc;
3298
3299 // Stack layout should now be:
3300 //
3301 // > 1
3302 // > 2
3303 // > CI for foo(1, 2)
3304 // > cref_or_me
3305 // > specval
3306 // > type
3307 // > receiver
3308 // > 1
3309 // > 2
3310 // > ( SP points here )
3311}
3312
3313static VALUE
3314vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3315{
3316 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3317
3318 const struct rb_callcache *cc = calling->cc;
3319 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3320 int param_size = ISEQ_BODY(iseq)->param.size;
3321 int local_size = ISEQ_BODY(iseq)->local_table_size;
3322
3323 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3324
3325 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3326 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3327}
3328
3329static VALUE
3330vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3331{
3332 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3333
3334 const struct rb_callcache *cc = calling->cc;
3335 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3336 int param_size = ISEQ_BODY(iseq)->param.size;
3337 int local_size = ISEQ_BODY(iseq)->local_table_size;
3338
3339 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3340
3341 // Setting up local size and param size
3342 local_size = local_size + vm_ci_argc(calling->cd->ci);
3343 param_size = param_size + vm_ci_argc(calling->cd->ci);
3344
3345 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3346 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3347}
3348
3349static inline VALUE
3350vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3351 int opt_pc, int param_size, int local_size)
3352{
3353 const struct rb_callinfo *ci = calling->cd->ci;
3354 const struct rb_callcache *cc = calling->cc;
3355
3356 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3357 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3358 }
3359 else {
3360 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3361 }
3362}
3363
3364static inline VALUE
3365vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3366 int opt_pc, int param_size, int local_size)
3367{
3368 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3369 VALUE *argv = cfp->sp - calling->argc;
3370 VALUE *sp = argv + param_size;
3371 cfp->sp = argv - 1 /* recv */;
3372
3373 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3374 calling->block_handler, (VALUE)me,
3375 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3376 local_size - param_size,
3377 ISEQ_BODY(iseq)->stack_max);
3378 return Qundef;
3379}
3380
3381static inline VALUE
3382vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3383{
3384 const struct rb_callcache *cc = calling->cc;
3385 unsigned int i;
3386 VALUE *argv = cfp->sp - calling->argc;
3387 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3388 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3389 VALUE *src_argv = argv;
3390 VALUE *sp_orig, *sp;
3391 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3392
3393 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3394 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3395 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3396 dst_captured->code.val = src_captured->code.val;
3397 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3398 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3399 }
3400 else {
3401 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3402 }
3403 }
3404
3405 vm_pop_frame(ec, cfp, cfp->ep);
3406 cfp = ec->cfp;
3407
3408 sp_orig = sp = cfp->sp;
3409
3410 /* push self */
3411 sp[0] = calling->recv;
3412 sp++;
3413
3414 /* copy arguments */
3415 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3416 *sp++ = src_argv[i];
3417 }
3418
3419 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3420 calling->recv, calling->block_handler, (VALUE)me,
3421 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3422 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3423 ISEQ_BODY(iseq)->stack_max);
3424
3425 cfp->sp = sp_orig;
3426
3427 return Qundef;
3428}
3429
3430static void
3431ractor_unsafe_check(void)
3432{
3433 if (!rb_ractor_main_p()) {
3434 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3435 }
3436}
3437
3438static VALUE
3439call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3440{
3441 ractor_unsafe_check();
3442 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3443 return (*f)(recv, rb_ary_new4(argc, argv));
3444}
3445
3446static VALUE
3447call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3448{
3449 ractor_unsafe_check();
3450 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3451 return (*f)(argc, argv, recv);
3452}
3453
3454static VALUE
3455call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3456{
3457 ractor_unsafe_check();
3458 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3459 return (*f)(recv);
3460}
3461
3462static VALUE
3463call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3464{
3465 ractor_unsafe_check();
3466 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3467 return (*f)(recv, argv[0]);
3468}
3469
3470static VALUE
3471call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3472{
3473 ractor_unsafe_check();
3474 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3475 return (*f)(recv, argv[0], argv[1]);
3476}
3477
3478static VALUE
3479call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3480{
3481 ractor_unsafe_check();
3482 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3483 return (*f)(recv, argv[0], argv[1], argv[2]);
3484}
3485
3486static VALUE
3487call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3488{
3489 ractor_unsafe_check();
3490 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3491 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3492}
3493
3494static VALUE
3495call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3496{
3497 ractor_unsafe_check();
3498 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3499 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3500}
3501
3502static VALUE
3503call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3504{
3505 ractor_unsafe_check();
3507 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3508}
3509
3510static VALUE
3511call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3512{
3513 ractor_unsafe_check();
3515 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3516}
3517
3518static VALUE
3519call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3520{
3521 ractor_unsafe_check();
3523 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3524}
3525
3526static VALUE
3527call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3528{
3529 ractor_unsafe_check();
3531 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3532}
3533
3534static VALUE
3535call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3536{
3537 ractor_unsafe_check();
3539 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3540}
3541
3542static VALUE
3543call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3544{
3545 ractor_unsafe_check();
3547 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3548}
3549
3550static VALUE
3551call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3552{
3553 ractor_unsafe_check();
3555 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3556}
3557
3558static VALUE
3559call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3560{
3561 ractor_unsafe_check();
3563 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3564}
3565
3566static VALUE
3567call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3568{
3569 ractor_unsafe_check();
3571 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3572}
3573
3574static VALUE
3575call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3576{
3577 ractor_unsafe_check();
3579 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3580}
3581
3582static VALUE
3583ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3584{
3585 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3586 return (*f)(recv, rb_ary_new4(argc, argv));
3587}
3588
3589static VALUE
3590ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3591{
3592 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3593 return (*f)(argc, argv, recv);
3594}
3595
3596static VALUE
3597ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3598{
3599 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3600 return (*f)(recv);
3601}
3602
3603static VALUE
3604ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3605{
3606 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3607 return (*f)(recv, argv[0]);
3608}
3609
3610static VALUE
3611ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3612{
3613 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3614 return (*f)(recv, argv[0], argv[1]);
3615}
3616
3617static VALUE
3618ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3619{
3620 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3621 return (*f)(recv, argv[0], argv[1], argv[2]);
3622}
3623
3624static VALUE
3625ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3626{
3627 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3628 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3629}
3630
3631static VALUE
3632ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3633{
3634 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3635 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3636}
3637
3638static VALUE
3639ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3640{
3642 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3643}
3644
3645static VALUE
3646ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3647{
3649 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3650}
3651
3652static VALUE
3653ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3654{
3656 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3657}
3658
3659static VALUE
3660ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3661{
3663 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3664}
3665
3666static VALUE
3667ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3668{
3670 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3671}
3672
3673static VALUE
3674ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3675{
3677 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3678}
3679
3680static VALUE
3681ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3682{
3684 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3685}
3686
3687static VALUE
3688ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3689{
3691 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3692}
3693
3694static VALUE
3695ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3696{
3698 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3699}
3700
3701static VALUE
3702ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3703{
3705 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3706}
3707
3708static inline int
3709vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3710{
3711 const int ov_flags = RAISED_STACKOVERFLOW;
3712 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3713 if (rb_ec_raised_p(ec, ov_flags)) {
3714 rb_ec_raised_reset(ec, ov_flags);
3715 return TRUE;
3716 }
3717 return FALSE;
3718}
3719
3720#define CHECK_CFP_CONSISTENCY(func) \
3721 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3722 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3723
3724static inline
3725const rb_method_cfunc_t *
3726vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3727{
3728#if VM_DEBUG_VERIFY_METHOD_CACHE
3729 switch (me->def->type) {
3730 case VM_METHOD_TYPE_CFUNC:
3731 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3732 break;
3733# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3734 METHOD_BUG(ISEQ);
3735 METHOD_BUG(ATTRSET);
3736 METHOD_BUG(IVAR);
3737 METHOD_BUG(BMETHOD);
3738 METHOD_BUG(ZSUPER);
3739 METHOD_BUG(UNDEF);
3740 METHOD_BUG(OPTIMIZED);
3741 METHOD_BUG(MISSING);
3742 METHOD_BUG(REFINED);
3743 METHOD_BUG(ALIAS);
3744# undef METHOD_BUG
3745 default:
3746 rb_bug("wrong method type: %d", me->def->type);
3747 }
3748#endif
3749 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3750}
3751
3752static VALUE
3753vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3754 int argc, VALUE *argv, VALUE *stack_bottom)
3755{
3756 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3757 const struct rb_callinfo *ci = calling->cd->ci;
3758 const struct rb_callcache *cc = calling->cc;
3759 VALUE val;
3760 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3761 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3762
3763 VALUE recv = calling->recv;
3764 VALUE block_handler = calling->block_handler;
3765 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3766
3767 if (UNLIKELY(calling->kw_splat)) {
3768 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3769 }
3770
3771 VM_ASSERT(reg_cfp == ec->cfp);
3772
3773 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3774 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3775
3776 vm_push_frame(ec, NULL, frame_type, recv,
3777 block_handler, (VALUE)me,
3778 0, ec->cfp->sp, 0, 0);
3779
3780 int len = cfunc->argc;
3781 if (len >= 0) rb_check_arity(argc, len, len);
3782
3783 reg_cfp->sp = stack_bottom;
3784 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3785
3786 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3787
3788 rb_vm_pop_frame(ec);
3789
3790 VM_ASSERT(ec->cfp->sp == stack_bottom);
3791
3792 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3793 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3794
3795 return val;
3796}
3797
3798// Push a C method frame for a given cme. This is called when JIT code skipped
3799// pushing a frame but the C method reached a point where a frame is needed.
3800void
3801rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3802{
3803 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3804 rb_execution_context_t *ec = GET_EC();
3805 VALUE *sp = ec->cfp->sp;
3806 VALUE recv = *(sp - recv_idx - 1);
3807 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3808 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3809#if VM_CHECK_MODE > 0
3810 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3811 *(GET_EC()->cfp->sp) = Qfalse;
3812#endif
3813 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3814}
3815
3816// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3817bool
3818rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3819{
3820 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3821}
3822
3823static VALUE
3824vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3825{
3826 int argc = calling->argc;
3827 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3828 VALUE *argv = &stack_bottom[1];
3829
3830 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3831}
3832
3833static VALUE
3834vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3835{
3836 const struct rb_callinfo *ci = calling->cd->ci;
3837 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3838
3839 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3840 VALUE argv_ary;
3841 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3842 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3843 int argc = RARRAY_LENINT(argv_ary);
3844 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3845 VALUE *stack_bottom = reg_cfp->sp - 2;
3846
3847 VM_ASSERT(calling->argc == 1);
3848 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3849 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3850
3851 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3852 }
3853 else {
3854 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3855
3856 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3857 }
3858}
3859
3860static inline VALUE
3861vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3862{
3863 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3864 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3865
3866 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3867 return vm_call_cfunc_other(ec, reg_cfp, calling);
3868 }
3869
3870 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3871 calling->kw_splat = 0;
3872 int i;
3873 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3874 VALUE *sp = stack_bottom;
3875 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3876 for(i = 0; i < argc; i++) {
3877 *++sp = argv[i];
3878 }
3879 reg_cfp->sp = sp+1;
3880
3881 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3882}
3883
3884static inline VALUE
3885vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3886{
3887 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3888 VALUE argv_ary = reg_cfp->sp[-1];
3889 int argc = RARRAY_LENINT(argv_ary);
3890 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3891 VALUE last_hash;
3892 int argc_offset = 0;
3893
3894 if (UNLIKELY(argc > 0 &&
3895 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3896 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3897 if (!RHASH_EMPTY_P(last_hash)) {
3898 return vm_call_cfunc_other(ec, reg_cfp, calling);
3899 }
3900 argc_offset++;
3901 }
3902 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3903}
3904
3905static inline VALUE
3906vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3907{
3908 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3909 VALUE keyword_hash = reg_cfp->sp[-1];
3910
3911 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3912 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3913 }
3914
3915 return vm_call_cfunc_other(ec, reg_cfp, calling);
3916}
3917
3918static VALUE
3919vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3920{
3921 const struct rb_callinfo *ci = calling->cd->ci;
3922 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3923
3924 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3925 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3926 // f(*a)
3927 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3928 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3929 }
3930 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3931 // f(*a, **kw)
3932 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3933 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3934 }
3935 }
3936
3937 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3938 return vm_call_cfunc_other(ec, reg_cfp, calling);
3939}
3940
3941static VALUE
3942vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3943{
3944 const struct rb_callcache *cc = calling->cc;
3945 RB_DEBUG_COUNTER_INC(ccf_ivar);
3946 cfp->sp -= 1;
3947 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3948 return ivar;
3949}
3950
3951static VALUE
3952vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3953{
3954 RB_DEBUG_COUNTER_INC(ccf_attrset);
3955 VALUE val = *(cfp->sp - 1);
3956 cfp->sp -= 2;
3957 attr_index_t index;
3958 shape_id_t dest_shape_id;
3959 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
3960 ID id = vm_cc_cme(cc)->def->body.attr.id;
3961 rb_check_frozen(obj);
3962 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3963 if (UNDEF_P(res)) {
3964 switch (BUILTIN_TYPE(obj)) {
3965 case T_OBJECT:
3966 case T_CLASS:
3967 case T_MODULE:
3968 break;
3969 default:
3970 {
3971 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3972 if (!UNDEF_P(res)) {
3973 return res;
3974 }
3975 }
3976 }
3977 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3978 }
3979 return res;
3980}
3981
3982static VALUE
3983vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3984{
3985 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3986}
3987
3988static inline VALUE
3989vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3990{
3991 rb_proc_t *proc;
3992 VALUE val;
3993 const struct rb_callcache *cc = calling->cc;
3994 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3995 VALUE procv = cme->def->body.bmethod.proc;
3996
3997 if (!RB_OBJ_SHAREABLE_P(procv) &&
3998 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
3999 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4000 }
4001
4002 /* control block frame */
4003 GetProcPtr(procv, proc);
4004 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4005
4006 return val;
4007}
4008
4009static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4010
4011static VALUE
4012vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4013{
4014 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4015
4016 const struct rb_callcache *cc = calling->cc;
4017 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4018 VALUE procv = cme->def->body.bmethod.proc;
4019
4020 if (!RB_OBJ_SHAREABLE_P(procv) &&
4021 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4022 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4023 }
4024
4025 rb_proc_t *proc;
4026 GetProcPtr(procv, proc);
4027 const struct rb_block *block = &proc->block;
4028
4029 while (vm_block_type(block) == block_type_proc) {
4030 block = vm_proc_block(block->as.proc);
4031 }
4032 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4033
4034 const struct rb_captured_block *captured = &block->as.captured;
4035 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4036 VALUE * const argv = cfp->sp - calling->argc;
4037 const int arg_size = ISEQ_BODY(iseq)->param.size;
4038
4039 int opt_pc;
4040 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4041 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4042 }
4043 else {
4044 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4045 }
4046
4047 cfp->sp = argv - 1; // -1 for the receiver
4048
4049 vm_push_frame(ec, iseq,
4050 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4051 calling->recv,
4052 VM_GUARDED_PREV_EP(captured->ep),
4053 (VALUE)cme,
4054 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4055 argv + arg_size,
4056 ISEQ_BODY(iseq)->local_table_size - arg_size,
4057 ISEQ_BODY(iseq)->stack_max);
4058
4059 return Qundef;
4060}
4061
4062static VALUE
4063vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4064{
4065 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4066
4067 VALUE *argv;
4068 int argc;
4069 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4070 if (UNLIKELY(calling->heap_argv)) {
4071 argv = RARRAY_PTR(calling->heap_argv);
4072 cfp->sp -= 2;
4073 }
4074 else {
4075 argc = calling->argc;
4076 argv = ALLOCA_N(VALUE, argc);
4077 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4078 cfp->sp += - argc - 1;
4079 }
4080
4081 return vm_call_bmethod_body(ec, calling, argv);
4082}
4083
4084static VALUE
4085vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4086{
4087 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4088
4089 const struct rb_callcache *cc = calling->cc;
4090 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4091 VALUE procv = cme->def->body.bmethod.proc;
4092 rb_proc_t *proc;
4093 GetProcPtr(procv, proc);
4094 const struct rb_block *block = &proc->block;
4095
4096 while (vm_block_type(block) == block_type_proc) {
4097 block = vm_proc_block(block->as.proc);
4098 }
4099 if (vm_block_type(block) == block_type_iseq) {
4100 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4101 return vm_call_iseq_bmethod(ec, cfp, calling);
4102 }
4103
4104 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4105 return vm_call_noniseq_bmethod(ec, cfp, calling);
4106}
4107
4108VALUE
4109rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4110{
4111 VALUE klass = current_class;
4112
4113 /* for prepended Module, then start from cover class */
4114 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4115 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4116 klass = RBASIC_CLASS(klass);
4117 }
4118
4119 while (RTEST(klass)) {
4120 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4121 if (owner == target_owner) {
4122 return klass;
4123 }
4124 klass = RCLASS_SUPER(klass);
4125 }
4126
4127 return current_class; /* maybe module function */
4128}
4129
4130static const rb_callable_method_entry_t *
4131aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4132{
4133 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4134 const rb_callable_method_entry_t *cme;
4135
4136 if (orig_me->defined_class == 0) {
4137 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4138 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4139 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4140
4141 if (me->def->reference_count == 1) {
4142 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4143 }
4144 else {
4146 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4147 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4148 }
4149 }
4150 else {
4151 cme = (const rb_callable_method_entry_t *)orig_me;
4152 }
4153
4154 VM_ASSERT(callable_method_entry_p(cme));
4155 return cme;
4156}
4157
4159rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4160{
4161 return aliased_callable_method_entry(me);
4162}
4163
4164static VALUE
4165vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4166{
4167 calling->cc = &VM_CC_ON_STACK(Qundef,
4168 vm_call_general,
4169 {{0}},
4170 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4171
4172 return vm_call_method_each_type(ec, cfp, calling);
4173}
4174
4175static enum method_missing_reason
4176ci_missing_reason(const struct rb_callinfo *ci)
4177{
4178 enum method_missing_reason stat = MISSING_NOENTRY;
4179 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4180 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4181 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4182 return stat;
4183}
4184
4185static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4186
4187static VALUE
4188vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4189 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4190{
4191 ASSUME(calling->argc >= 0);
4192
4193 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4194 int argc = calling->argc;
4195 VALUE recv = calling->recv;
4196 VALUE klass = CLASS_OF(recv);
4197 ID mid = rb_check_id(&symbol);
4198 flags |= VM_CALL_OPT_SEND;
4199
4200 if (UNLIKELY(! mid)) {
4201 mid = idMethodMissing;
4202 missing_reason = ci_missing_reason(ci);
4203 ec->method_missing_reason = missing_reason;
4204
4205 VALUE argv_ary;
4206 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4207 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4208 rb_ary_unshift(argv_ary, symbol);
4209
4210 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4211 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4212 VALUE exc = rb_make_no_method_exception(
4213 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4214
4215 rb_exc_raise(exc);
4216 }
4217 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4218 }
4219 else {
4220 /* E.g. when argc == 2
4221 *
4222 * | | | | TOPN
4223 * | | +------+
4224 * | | +---> | arg1 | 0
4225 * +------+ | +------+
4226 * | arg1 | -+ +-> | arg0 | 1
4227 * +------+ | +------+
4228 * | arg0 | ---+ | sym | 2
4229 * +------+ +------+
4230 * | recv | | recv | 3
4231 * --+------+--------+------+------
4232 */
4233 int i = argc;
4234 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4235 INC_SP(1);
4236 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4237 argc = ++calling->argc;
4238
4239 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4240 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4241 TOPN(i) = symbol;
4242 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4243 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4244 VALUE exc = rb_make_no_method_exception(
4245 rb_eNoMethodError, 0, recv, argc, argv, priv);
4246
4247 rb_exc_raise(exc);
4248 }
4249 else {
4250 TOPN(i) = rb_str_intern(symbol);
4251 }
4252 }
4253 }
4254
4255 struct rb_forwarding_call_data new_fcd = {
4256 .cd = {
4257 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4258 .cc = NULL,
4259 },
4260 .caller_ci = NULL,
4261 };
4262
4263 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4264 calling->cd = &new_fcd.cd;
4265 }
4266 else {
4267 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4268 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4269 new_fcd.caller_ci = caller_ci;
4270 calling->cd = (struct rb_call_data *)&new_fcd;
4271 }
4272 calling->cc = &VM_CC_ON_STACK(klass,
4273 vm_call_general,
4274 { .method_missing_reason = missing_reason },
4275 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4276
4277 if (flags & VM_CALL_FCALL) {
4278 return vm_call_method(ec, reg_cfp, calling);
4279 }
4280
4281 const struct rb_callcache *cc = calling->cc;
4282 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4283
4284 if (vm_cc_cme(cc) != NULL) {
4285 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4286 case METHOD_VISI_PUBLIC: /* likely */
4287 return vm_call_method_each_type(ec, reg_cfp, calling);
4288 case METHOD_VISI_PRIVATE:
4289 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4290 break;
4291 case METHOD_VISI_PROTECTED:
4292 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4293 break;
4294 default:
4295 VM_UNREACHABLE(vm_call_method);
4296 }
4297 return vm_call_method_missing(ec, reg_cfp, calling);
4298 }
4299
4300 return vm_call_method_nome(ec, reg_cfp, calling);
4301}
4302
4303static VALUE
4304vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4305{
4306 const struct rb_callinfo *ci = calling->cd->ci;
4307 int i;
4308 VALUE sym;
4309
4310 i = calling->argc - 1;
4311
4312 if (calling->argc == 0) {
4313 rb_raise(rb_eArgError, "no method name given");
4314 }
4315
4316 sym = TOPN(i);
4317 /* E.g. when i == 2
4318 *
4319 * | | | | TOPN
4320 * +------+ | |
4321 * | arg1 | ---+ | | 0
4322 * +------+ | +------+
4323 * | arg0 | -+ +-> | arg1 | 1
4324 * +------+ | +------+
4325 * | sym | +---> | arg0 | 2
4326 * +------+ +------+
4327 * | recv | | recv | 3
4328 * --+------+--------+------+------
4329 */
4330 /* shift arguments */
4331 if (i > 0) {
4332 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4333 }
4334 calling->argc -= 1;
4335 DEC_SP(1);
4336
4337 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4338}
4339
4340static VALUE
4341vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4342{
4343 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4344 const struct rb_callinfo *ci = calling->cd->ci;
4345 int flags = VM_CALL_FCALL;
4346 VALUE sym;
4347
4348 VALUE argv_ary;
4349 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4350 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4351 sym = rb_ary_shift(argv_ary);
4352 flags |= VM_CALL_ARGS_SPLAT;
4353 if (calling->kw_splat) {
4354 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4355 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4356 calling->kw_splat = 0;
4357 }
4358 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4359 }
4360
4361 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4362 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4363}
4364
4365static VALUE
4366vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4367{
4368 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4369 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4370}
4371
4372static VALUE
4373vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4374{
4375 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4376
4377 const struct rb_callinfo *ci = calling->cd->ci;
4378 int flags = vm_ci_flag(ci);
4379
4380 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4381 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4382 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4383 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4384 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4385 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4386 }
4387
4388 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4389 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4390}
4391
4392static VALUE
4393vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4394 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4395{
4396 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4397
4398 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4399 unsigned int argc, flag;
4400
4401 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4402 argc = ++calling->argc;
4403
4404 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4405 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4406 vm_check_canary(ec, reg_cfp->sp);
4407 if (argc > 1) {
4408 MEMMOVE(argv+1, argv, VALUE, argc-1);
4409 }
4410 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4411 INC_SP(1);
4412
4413 ec->method_missing_reason = reason;
4414
4415 struct rb_forwarding_call_data new_fcd = {
4416 .cd = {
4417 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4418 .cc = NULL,
4419 },
4420 .caller_ci = NULL,
4421 };
4422
4423 if (!(flag & VM_CALL_FORWARDING)) {
4424 calling->cd = &new_fcd.cd;
4425 }
4426 else {
4427 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4428 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4429 new_fcd.caller_ci = caller_ci;
4430 calling->cd = (struct rb_call_data *)&new_fcd;
4431 }
4432
4433 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4434 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4435 return vm_call_method(ec, reg_cfp, calling);
4436}
4437
4438static VALUE
4439vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4440{
4441 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4442}
4443
4444static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4445static VALUE
4446vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4447{
4448 klass = RCLASS_SUPER(klass);
4449
4450 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4451 if (cme == NULL) {
4452 return vm_call_method_nome(ec, cfp, calling);
4453 }
4454 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4455 cme->def->body.refined.orig_me) {
4456 cme = refined_method_callable_without_refinement(cme);
4457 }
4458
4459 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4460
4461 return vm_call_method_each_type(ec, cfp, calling);
4462}
4463
4464static inline VALUE
4465find_refinement(VALUE refinements, VALUE klass)
4466{
4467 if (NIL_P(refinements)) {
4468 return Qnil;
4469 }
4470 return rb_hash_lookup(refinements, klass);
4471}
4472
4473PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4474static rb_control_frame_t *
4475current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4476{
4477 rb_control_frame_t *top_cfp = cfp;
4478
4479 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4480 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4481
4482 do {
4483 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4484 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4485 /* TODO: orphan block */
4486 return top_cfp;
4487 }
4488 } while (cfp->iseq != local_iseq);
4489 }
4490 return cfp;
4491}
4492
4493static const rb_callable_method_entry_t *
4494refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4495{
4496 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4497 const rb_callable_method_entry_t *cme;
4498
4499 if (orig_me->defined_class == 0) {
4500 cme = NULL;
4502 }
4503 else {
4504 cme = (const rb_callable_method_entry_t *)orig_me;
4505 }
4506
4507 VM_ASSERT(callable_method_entry_p(cme));
4508
4509 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4510 cme = NULL;
4511 }
4512
4513 return cme;
4514}
4515
4516static const rb_callable_method_entry_t *
4517search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4518{
4519 ID mid = vm_ci_mid(calling->cd->ci);
4520 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4521 const struct rb_callcache * const cc = calling->cc;
4522 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4523
4524 for (; cref; cref = CREF_NEXT(cref)) {
4525 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4526 if (NIL_P(refinement)) continue;
4527
4528 const rb_callable_method_entry_t *const ref_me =
4529 rb_callable_method_entry(refinement, mid);
4530
4531 if (ref_me) {
4532 if (vm_cc_call(cc) == vm_call_super_method) {
4533 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4534 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4535 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4536 continue;
4537 }
4538 }
4539
4540 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4541 cme->def != ref_me->def) {
4542 cme = ref_me;
4543 }
4544 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4545 return cme;
4546 }
4547 }
4548 else {
4549 return NULL;
4550 }
4551 }
4552
4553 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4554 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4555 }
4556 else {
4557 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4558 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4559 return cme;
4560 }
4561}
4562
4563static VALUE
4564vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4565{
4566 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4567
4568 if (ref_cme) {
4569 if (calling->cd->cc) {
4570 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4571 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4572 return vm_call_method(ec, cfp, calling);
4573 }
4574 else {
4575 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4576 calling->cc= ref_cc;
4577 return vm_call_method(ec, cfp, calling);
4578 }
4579 }
4580 else {
4581 return vm_call_method_nome(ec, cfp, calling);
4582 }
4583}
4584
4585static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4586
4587NOINLINE(static VALUE
4588 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4589 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4590
4591static VALUE
4592vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4593 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4594{
4595 int argc = calling->argc;
4596
4597 /* remove self */
4598 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4599 DEC_SP(1);
4600
4601 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4602}
4603
4604static VALUE
4605vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4606{
4607 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4608
4609 const struct rb_callinfo *ci = calling->cd->ci;
4610 VALUE procval = calling->recv;
4611 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4612}
4613
4614static VALUE
4615vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4616{
4617 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4618
4619 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4620 const struct rb_callinfo *ci = calling->cd->ci;
4621
4622 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4623 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4624 }
4625 else {
4626 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4627 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4628 return vm_call_general(ec, reg_cfp, calling);
4629 }
4630}
4631
4632static VALUE
4633vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4634{
4635 VALUE recv = calling->recv;
4636
4637 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4638 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4639 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4640
4641 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4642 return internal_RSTRUCT_GET(recv, off);
4643}
4644
4645static VALUE
4646vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4647{
4648 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4649
4650 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4651 reg_cfp->sp -= 1;
4652 return ret;
4653}
4654
4655static VALUE
4656vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4657{
4658 VALUE recv = calling->recv;
4659
4660 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4661 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4662 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4663
4664 rb_check_frozen(recv);
4665
4666 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4667 internal_RSTRUCT_SET(recv, off, val);
4668
4669 return val;
4670}
4671
4672static VALUE
4673vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4674{
4675 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4676
4677 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4678 reg_cfp->sp -= 2;
4679 return ret;
4680}
4681
4682NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4683 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4684
4685#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4686 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4687 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4688 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4689 var = func; \
4690 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4691 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4692 } \
4693 else { \
4694 nohook; \
4695 var = func; \
4696 }
4697
4698static VALUE
4699vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4700 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4701{
4702 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4703 case OPTIMIZED_METHOD_TYPE_SEND:
4704 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4705 return vm_call_opt_send(ec, cfp, calling);
4706 case OPTIMIZED_METHOD_TYPE_CALL:
4707 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4708 return vm_call_opt_call(ec, cfp, calling);
4709 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4710 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4711 return vm_call_opt_block_call(ec, cfp, calling);
4712 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4713 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4714 rb_check_arity(calling->argc, 0, 0);
4715
4716 VALUE v;
4717 VM_CALL_METHOD_ATTR(v,
4718 vm_call_opt_struct_aref(ec, cfp, calling),
4719 set_vm_cc_ivar(cc); \
4720 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4721 return v;
4722 }
4723 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4724 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4725 rb_check_arity(calling->argc, 1, 1);
4726
4727 VALUE v;
4728 VM_CALL_METHOD_ATTR(v,
4729 vm_call_opt_struct_aset(ec, cfp, calling),
4730 set_vm_cc_ivar(cc); \
4731 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4732 return v;
4733 }
4734 default:
4735 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4736 }
4737}
4738
4739static VALUE
4740vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4741{
4742 const struct rb_callinfo *ci = calling->cd->ci;
4743 const struct rb_callcache *cc = calling->cc;
4744 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4745 VALUE v;
4746
4747 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4748
4749 switch (cme->def->type) {
4750 case VM_METHOD_TYPE_ISEQ:
4751 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4752 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4753 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4754 }
4755 else {
4756 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4757 return vm_call_iseq_setup(ec, cfp, calling);
4758 }
4759
4760 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4761 case VM_METHOD_TYPE_CFUNC:
4762 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4763 return vm_call_cfunc(ec, cfp, calling);
4764
4765 case VM_METHOD_TYPE_ATTRSET:
4766 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4767
4768 rb_check_arity(calling->argc, 1, 1);
4769
4770 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4771
4772 if (vm_cc_markable(cc)) {
4773 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4774 VM_CALL_METHOD_ATTR(v,
4775 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4776 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4777 }
4778 else {
4779 cc = &((struct rb_callcache) {
4780 .flags = T_IMEMO |
4781 (imemo_callcache << FL_USHIFT) |
4782 VM_CALLCACHE_UNMARKABLE |
4783 VM_CALLCACHE_ON_STACK,
4784 .klass = cc->klass,
4785 .cme_ = cc->cme_,
4786 .call_ = cc->call_,
4787 .aux_ = {
4788 .attr = {
4789 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4790 }
4791 },
4792 });
4793
4794 VM_CALL_METHOD_ATTR(v,
4795 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4796 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4797 }
4798 return v;
4799
4800 case VM_METHOD_TYPE_IVAR:
4801 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4802 rb_check_arity(calling->argc, 0, 0);
4803 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4804 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4805 VM_CALL_METHOD_ATTR(v,
4806 vm_call_ivar(ec, cfp, calling),
4807 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4808 return v;
4809
4810 case VM_METHOD_TYPE_MISSING:
4811 vm_cc_method_missing_reason_set(cc, 0);
4812 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4813 return vm_call_method_missing(ec, cfp, calling);
4814
4815 case VM_METHOD_TYPE_BMETHOD:
4816 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4817 return vm_call_bmethod(ec, cfp, calling);
4818
4819 case VM_METHOD_TYPE_ALIAS:
4820 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4821 return vm_call_alias(ec, cfp, calling);
4822
4823 case VM_METHOD_TYPE_OPTIMIZED:
4824 return vm_call_optimized(ec, cfp, calling, ci, cc);
4825
4826 case VM_METHOD_TYPE_UNDEF:
4827 break;
4828
4829 case VM_METHOD_TYPE_ZSUPER:
4830 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4831
4832 case VM_METHOD_TYPE_REFINED:
4833 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4834 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4835 return vm_call_refined(ec, cfp, calling);
4836 }
4837
4838 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4839}
4840
4841NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4842
4843static VALUE
4844vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4845{
4846 /* method missing */
4847 const struct rb_callinfo *ci = calling->cd->ci;
4848 const int stat = ci_missing_reason(ci);
4849
4850 if (vm_ci_mid(ci) == idMethodMissing) {
4851 if (UNLIKELY(calling->heap_argv)) {
4852 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4853 }
4854 else {
4855 rb_control_frame_t *reg_cfp = cfp;
4856 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4857 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4858 }
4859 }
4860 else {
4861 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4862 }
4863}
4864
4865/* Protected method calls and super invocations need to check that the receiver
4866 * (self for super) inherits the module on which the method is defined.
4867 * In the case of refinements, it should consider the original class not the
4868 * refinement.
4869 */
4870static VALUE
4871vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4872{
4873 VALUE defined_class = me->defined_class;
4874 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4875 return NIL_P(refined_class) ? defined_class : refined_class;
4876}
4877
4878static inline VALUE
4879vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4880{
4881 const struct rb_callinfo *ci = calling->cd->ci;
4882 const struct rb_callcache *cc = calling->cc;
4883
4884 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4885
4886 if (vm_cc_cme(cc) != NULL) {
4887 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4888 case METHOD_VISI_PUBLIC: /* likely */
4889 return vm_call_method_each_type(ec, cfp, calling);
4890
4891 case METHOD_VISI_PRIVATE:
4892 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4893 enum method_missing_reason stat = MISSING_PRIVATE;
4894 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4895
4896 vm_cc_method_missing_reason_set(cc, stat);
4897 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4898 return vm_call_method_missing(ec, cfp, calling);
4899 }
4900 return vm_call_method_each_type(ec, cfp, calling);
4901
4902 case METHOD_VISI_PROTECTED:
4903 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4904 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4905 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4906 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4907 return vm_call_method_missing(ec, cfp, calling);
4908 }
4909 else {
4910 /* caching method info to dummy cc */
4911 VM_ASSERT(vm_cc_cme(cc) != NULL);
4912 struct rb_callcache cc_on_stack = *cc;
4913 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4914 calling->cc = &cc_on_stack;
4915 return vm_call_method_each_type(ec, cfp, calling);
4916 }
4917 }
4918 return vm_call_method_each_type(ec, cfp, calling);
4919
4920 default:
4921 rb_bug("unreachable");
4922 }
4923 }
4924 else {
4925 return vm_call_method_nome(ec, cfp, calling);
4926 }
4927}
4928
4929static VALUE
4930vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4931{
4932 RB_DEBUG_COUNTER_INC(ccf_general);
4933 return vm_call_method(ec, reg_cfp, calling);
4934}
4935
4936void
4937rb_vm_cc_general(const struct rb_callcache *cc)
4938{
4939 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4940 VM_ASSERT(cc != vm_cc_empty());
4941
4942 *(vm_call_handler *)&cc->call_ = vm_call_general;
4943}
4944
4945static VALUE
4946vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4947{
4948 RB_DEBUG_COUNTER_INC(ccf_super_method);
4949
4950 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4951 // can merge the function and the address of the function becomes same.
4952 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4953 if (ec == NULL) rb_bug("unreachable");
4954
4955 /* this check is required to distinguish with other functions. */
4956 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4957 return vm_call_method(ec, reg_cfp, calling);
4958}
4959
4960/* super */
4961
4962static inline VALUE
4963vm_search_normal_superclass(VALUE klass)
4964{
4965 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4966 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4967 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4968 klass = RBASIC(klass)->klass;
4969 }
4970 klass = RCLASS_ORIGIN(klass);
4971 return RCLASS_SUPER(klass);
4972}
4973
4974NORETURN(static void vm_super_outside(void));
4975
4976static void
4977vm_super_outside(void)
4978{
4979 rb_raise(rb_eNoMethodError, "super called outside of method");
4980}
4981
4982static const struct rb_callcache *
4983empty_cc_for_super(void)
4984{
4985 return &vm_empty_cc_for_super;
4986}
4987
4988static const struct rb_callcache *
4989vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
4990{
4991 VALUE current_defined_class;
4992 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
4993
4994 if (!me) {
4995 vm_super_outside();
4996 }
4997
4998 current_defined_class = vm_defined_class_for_protected_call(me);
4999
5000 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5001 reg_cfp->iseq != method_entry_iseqptr(me) &&
5002 !rb_obj_is_kind_of(recv, current_defined_class)) {
5003 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5004 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5005
5006 if (m) { /* not bound UnboundMethod */
5007 rb_raise(rb_eTypeError,
5008 "self has wrong type to call super in this context: "
5009 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5010 rb_obj_class(recv), m);
5011 }
5012 }
5013
5014 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5015 rb_raise(rb_eRuntimeError,
5016 "implicit argument passing of super from method defined"
5017 " by define_method() is not supported."
5018 " Specify all arguments explicitly.");
5019 }
5020
5021 ID mid = me->def->original_id;
5022
5023 if (!vm_ci_markable(cd->ci)) {
5024 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5025 }
5026 else {
5027 // update iseq. really? (TODO)
5028 cd->ci = vm_ci_new_runtime(mid,
5029 vm_ci_flag(cd->ci),
5030 vm_ci_argc(cd->ci),
5031 vm_ci_kwarg(cd->ci));
5032
5033 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5034 }
5035
5036 const struct rb_callcache *cc;
5037
5038 VALUE klass = vm_search_normal_superclass(me->defined_class);
5039
5040 if (!klass) {
5041 /* bound instance method of module */
5042 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5043 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5044 }
5045 else {
5046 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5047 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5048
5049 // define_method can cache for different method id
5050 if (cached_cme == NULL) {
5051 // empty_cc_for_super is not markable object
5052 cd->cc = empty_cc_for_super();
5053 }
5054 else if (cached_cme->called_id != mid) {
5055 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5056 if (cme) {
5057 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5058 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5059 }
5060 else {
5061 cd->cc = cc = empty_cc_for_super();
5062 }
5063 }
5064 else {
5065 switch (cached_cme->def->type) {
5066 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5067 case VM_METHOD_TYPE_REFINED:
5068 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5069 case VM_METHOD_TYPE_ATTRSET:
5070 case VM_METHOD_TYPE_IVAR:
5071 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5072 break;
5073 default:
5074 break; // use fastpath
5075 }
5076 }
5077 }
5078
5079 VM_ASSERT((vm_cc_cme(cc), true));
5080
5081 return cc;
5082}
5083
5084/* yield */
5085
5086static inline int
5087block_proc_is_lambda(const VALUE procval)
5088{
5089 rb_proc_t *proc;
5090
5091 if (procval) {
5092 GetProcPtr(procval, proc);
5093 return proc->is_lambda;
5094 }
5095 else {
5096 return 0;
5097 }
5098}
5099
5100static inline const rb_namespace_t *
5101block_proc_namespace(const VALUE procval)
5102{
5103 rb_proc_t *proc;
5104
5105 if (procval) {
5106 GetProcPtr(procval, proc);
5107 return proc->ns;
5108 }
5109 else {
5110 return NULL;
5111 }
5112}
5113
5114static VALUE
5115vm_yield_with_cfunc(rb_execution_context_t *ec,
5116 const struct rb_captured_block *captured,
5117 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5119{
5120 int is_lambda = FALSE; /* TODO */
5121 VALUE val, arg, blockarg;
5122 int frame_flag;
5123 const struct vm_ifunc *ifunc = captured->code.ifunc;
5124
5125 if (is_lambda) {
5126 arg = rb_ary_new4(argc, argv);
5127 }
5128 else if (argc == 0) {
5129 arg = Qnil;
5130 }
5131 else {
5132 arg = argv[0];
5133 }
5134
5135 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5136
5137 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5138 if (kw_splat) {
5139 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5140 }
5141
5142 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5143 frame_flag,
5144 self,
5145 VM_GUARDED_PREV_EP(captured->ep),
5146 (VALUE)me,
5147 0, ec->cfp->sp, 0, 0);
5148 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5149 rb_vm_pop_frame(ec);
5150
5151 return val;
5152}
5153
5154VALUE
5155rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5156{
5157 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5158}
5159
5160static VALUE
5161vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5162{
5163 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5164}
5165
5166static inline int
5167vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5168{
5169 int i;
5170 long len = RARRAY_LEN(ary);
5171
5172 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5173
5174 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5175 argv[i] = RARRAY_AREF(ary, i);
5176 }
5177
5178 return i;
5179}
5180
5181static inline VALUE
5182vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5183{
5184 VALUE ary, arg0 = argv[0];
5185 ary = rb_check_array_type(arg0);
5186#if 0
5187 argv[0] = arg0;
5188#else
5189 VM_ASSERT(argv[0] == arg0);
5190#endif
5191 return ary;
5192}
5193
5194static int
5195vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5196{
5197 if (rb_simple_iseq_p(iseq)) {
5198 rb_control_frame_t *cfp = ec->cfp;
5199 VALUE arg0;
5200
5201 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5202
5203 if (arg_setup_type == arg_setup_block &&
5204 calling->argc == 1 &&
5205 ISEQ_BODY(iseq)->param.flags.has_lead &&
5206 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5207 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5208 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5209 }
5210
5211 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5212 if (arg_setup_type == arg_setup_block) {
5213 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5214 int i;
5215 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5216 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5217 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5218 }
5219 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5220 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5221 }
5222 }
5223 else {
5224 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5225 }
5226 }
5227
5228 return 0;
5229 }
5230 else {
5231 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5232 }
5233}
5234
5235static int
5236vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5237{
5238 struct rb_calling_info calling_entry, *calling;
5239
5240 calling = &calling_entry;
5241 calling->argc = argc;
5242 calling->block_handler = block_handler;
5243 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5244 calling->recv = Qundef;
5245 calling->heap_argv = 0;
5246 calling->cc = NULL;
5247 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5248
5249 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5250}
5251
5252/* ruby iseq -> ruby block */
5253
5254static VALUE
5255vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5256 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5257 bool is_lambda, VALUE block_handler)
5258{
5259 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5260 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5261 const int arg_size = ISEQ_BODY(iseq)->param.size;
5262 VALUE * const rsp = GET_SP() - calling->argc;
5263 VALUE * const argv = rsp;
5264 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5265 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5266
5267 SET_SP(rsp);
5268
5269 if (calling->proc_ns) {
5270 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5271 }
5272
5273 vm_push_frame(ec, iseq,
5274 frame_flag,
5275 captured->self,
5276 VM_GUARDED_PREV_EP(captured->ep), 0,
5277 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5278 rsp + arg_size,
5279 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5280
5281 return Qundef;
5282}
5283
5284static VALUE
5285vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5286 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5287 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5288{
5289 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5290 int flags = vm_ci_flag(ci);
5291
5292 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5293 ((calling->argc == 0) ||
5294 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5295 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5296 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5297 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5298 flags = 0;
5299 if (UNLIKELY(calling->heap_argv)) {
5300#if VM_ARGC_STACK_MAX < 0
5301 if (RARRAY_LEN(calling->heap_argv) < 1) {
5302 rb_raise(rb_eArgError, "no receiver given");
5303 }
5304#endif
5305 calling->recv = rb_ary_shift(calling->heap_argv);
5306 // Modify stack to avoid cfp consistency error
5307 reg_cfp->sp++;
5308 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5309 reg_cfp->sp[-2] = calling->recv;
5310 flags |= VM_CALL_ARGS_SPLAT;
5311 }
5312 else {
5313 if (calling->argc < 1) {
5314 rb_raise(rb_eArgError, "no receiver given");
5315 }
5316 calling->recv = TOPN(--calling->argc);
5317 }
5318 if (calling->kw_splat) {
5319 flags |= VM_CALL_KW_SPLAT;
5320 }
5321 }
5322 else {
5323 if (calling->argc < 1) {
5324 rb_raise(rb_eArgError, "no receiver given");
5325 }
5326 calling->recv = TOPN(--calling->argc);
5327 }
5328
5329 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5330}
5331
5332static VALUE
5333vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5334 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5335 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5336{
5337 VALUE val;
5338 int argc;
5339 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5340 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5341 argc = calling->argc;
5342 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5343 POPN(argc); /* TODO: should put before C/yield? */
5344 return val;
5345}
5346
5347static VALUE
5348vm_proc_to_block_handler(VALUE procval)
5349{
5350 const struct rb_block *block = vm_proc_block(procval);
5351
5352 switch (vm_block_type(block)) {
5353 case block_type_iseq:
5354 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5355 case block_type_ifunc:
5356 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5357 case block_type_symbol:
5358 return VM_BH_FROM_SYMBOL(block->as.symbol);
5359 case block_type_proc:
5360 return VM_BH_FROM_PROC(block->as.proc);
5361 }
5362 VM_UNREACHABLE(vm_yield_with_proc);
5363 return Qundef;
5364}
5365
5366static VALUE
5367vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5368 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5369 bool is_lambda, VALUE block_handler)
5370{
5371 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5372 VALUE proc = VM_BH_TO_PROC(block_handler);
5373 if (!calling->proc_ns) {
5374 calling->proc_ns = block_proc_namespace(proc);
5375 }
5376 is_lambda = block_proc_is_lambda(proc);
5377 block_handler = vm_proc_to_block_handler(proc);
5378 }
5379
5380 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5381}
5382
5383static inline VALUE
5384vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5385 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5386 bool is_lambda, VALUE block_handler)
5387{
5388 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5389 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5390 bool is_lambda, VALUE block_handler);
5391
5392 switch (vm_block_handler_type(block_handler)) {
5393 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5394 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5395 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5396 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5397 default: rb_bug("vm_invoke_block: unreachable");
5398 }
5399
5400 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5401}
5402
5403static VALUE
5404vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5405{
5406 const rb_execution_context_t *ec = GET_EC();
5407 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5408 struct rb_captured_block *captured;
5409
5410 if (cfp == 0) {
5411 rb_bug("vm_make_proc_with_iseq: unreachable");
5412 }
5413
5414 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5415 captured->code.iseq = blockiseq;
5416
5417 return rb_vm_make_proc(ec, captured, rb_cProc);
5418}
5419
5420static VALUE
5421vm_once_exec(VALUE iseq)
5422{
5423 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5424 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5425}
5426
5427static VALUE
5428vm_once_clear(VALUE data)
5429{
5430 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5431 is->once.running_thread = NULL;
5432 return Qnil;
5433}
5434
5435/* defined insn */
5436
5437static bool
5438check_respond_to_missing(VALUE obj, VALUE v)
5439{
5440 VALUE args[2];
5441 VALUE r;
5442
5443 args[0] = obj; args[1] = Qfalse;
5444 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5445 if (!UNDEF_P(r) && RTEST(r)) {
5446 return true;
5447 }
5448 else {
5449 return false;
5450 }
5451}
5452
5453static bool
5454vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5455{
5456 VALUE klass;
5457 enum defined_type type = (enum defined_type)op_type;
5458
5459 switch (type) {
5460 case DEFINED_IVAR:
5461 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5462 break;
5463 case DEFINED_GVAR:
5464 return rb_gvar_defined(SYM2ID(obj));
5465 break;
5466 case DEFINED_CVAR: {
5467 const rb_cref_t *cref = vm_get_cref(GET_EP());
5468 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5469 return rb_cvar_defined(klass, SYM2ID(obj));
5470 break;
5471 }
5472 case DEFINED_CONST:
5473 case DEFINED_CONST_FROM: {
5474 bool allow_nil = type == DEFINED_CONST;
5475 klass = v;
5476 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5477 break;
5478 }
5479 case DEFINED_FUNC:
5480 klass = CLASS_OF(v);
5481 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5482 break;
5483 case DEFINED_METHOD:{
5484 VALUE klass = CLASS_OF(v);
5485 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5486
5487 if (me) {
5488 switch (METHOD_ENTRY_VISI(me)) {
5489 case METHOD_VISI_PRIVATE:
5490 break;
5491 case METHOD_VISI_PROTECTED:
5492 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5493 break;
5494 }
5495 case METHOD_VISI_PUBLIC:
5496 return true;
5497 break;
5498 default:
5499 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5500 }
5501 }
5502 else {
5503 return check_respond_to_missing(obj, v);
5504 }
5505 break;
5506 }
5507 case DEFINED_YIELD:
5508 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5509 return true;
5510 }
5511 break;
5512 case DEFINED_ZSUPER:
5513 {
5514 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5515
5516 if (me) {
5517 VALUE klass = vm_search_normal_superclass(me->defined_class);
5518 if (!klass) return false;
5519
5520 ID id = me->def->original_id;
5521
5522 return rb_method_boundp(klass, id, 0);
5523 }
5524 }
5525 break;
5526 case DEFINED_REF:
5527 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5528 default:
5529 rb_bug("unimplemented defined? type (VM)");
5530 break;
5531 }
5532
5533 return false;
5534}
5535
5536bool
5537rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5538{
5539 return vm_defined(ec, reg_cfp, op_type, obj, v);
5540}
5541
5542static const VALUE *
5543vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5544{
5545 rb_num_t i;
5546 const VALUE *ep = reg_ep;
5547 for (i = 0; i < lv; i++) {
5548 ep = GET_PREV_EP(ep);
5549 }
5550 return ep;
5551}
5552
5553static VALUE
5554vm_get_special_object(const VALUE *const reg_ep,
5555 enum vm_special_object_type type)
5556{
5557 switch (type) {
5558 case VM_SPECIAL_OBJECT_VMCORE:
5559 return rb_mRubyVMFrozenCore;
5560 case VM_SPECIAL_OBJECT_CBASE:
5561 return vm_get_cbase(reg_ep);
5562 case VM_SPECIAL_OBJECT_CONST_BASE:
5563 return vm_get_const_base(reg_ep);
5564 default:
5565 rb_bug("putspecialobject insn: unknown value_type %d", type);
5566 }
5567}
5568
5569// ZJIT implementation is using the C function
5570// and needs to call a non-static function
5571VALUE
5572rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5573{
5574 return vm_get_special_object(reg_ep, type);
5575}
5576
5577static VALUE
5578vm_concat_array(VALUE ary1, VALUE ary2st)
5579{
5580 const VALUE ary2 = ary2st;
5581 VALUE tmp1 = rb_check_to_array(ary1);
5582 VALUE tmp2 = rb_check_to_array(ary2);
5583
5584 if (NIL_P(tmp1)) {
5585 tmp1 = rb_ary_new3(1, ary1);
5586 }
5587 if (tmp1 == ary1) {
5588 tmp1 = rb_ary_dup(ary1);
5589 }
5590
5591 if (NIL_P(tmp2)) {
5592 return rb_ary_push(tmp1, ary2);
5593 }
5594 else {
5595 return rb_ary_concat(tmp1, tmp2);
5596 }
5597}
5598
5599static VALUE
5600vm_concat_to_array(VALUE ary1, VALUE ary2st)
5601{
5602 /* ary1 must be a newly created array */
5603 const VALUE ary2 = ary2st;
5604
5605 if (NIL_P(ary2)) return ary1;
5606
5607 VALUE tmp2 = rb_check_to_array(ary2);
5608
5609 if (NIL_P(tmp2)) {
5610 return rb_ary_push(ary1, ary2);
5611 }
5612 else {
5613 return rb_ary_concat(ary1, tmp2);
5614 }
5615}
5616
5617// YJIT implementation is using the C function
5618// and needs to call a non-static function
5619VALUE
5620rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5621{
5622 return vm_concat_array(ary1, ary2st);
5623}
5624
5625VALUE
5626rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5627{
5628 return vm_concat_to_array(ary1, ary2st);
5629}
5630
5631static VALUE
5632vm_splat_array(VALUE flag, VALUE ary)
5633{
5634 if (NIL_P(ary)) {
5635 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5636 }
5637 VALUE tmp = rb_check_to_array(ary);
5638 if (NIL_P(tmp)) {
5639 return rb_ary_new3(1, ary);
5640 }
5641 else if (RTEST(flag)) {
5642 return rb_ary_dup(tmp);
5643 }
5644 else {
5645 return tmp;
5646 }
5647}
5648
5649// YJIT implementation is using the C function
5650// and needs to call a non-static function
5651VALUE
5652rb_vm_splat_array(VALUE flag, VALUE ary)
5653{
5654 return vm_splat_array(flag, ary);
5655}
5656
5657static VALUE
5658vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5659{
5660 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5661
5662 if (flag & VM_CHECKMATCH_ARRAY) {
5663 long i;
5664 const long n = RARRAY_LEN(pattern);
5665
5666 for (i = 0; i < n; i++) {
5667 VALUE v = RARRAY_AREF(pattern, i);
5668 VALUE c = check_match(ec, v, target, type);
5669
5670 if (RTEST(c)) {
5671 return c;
5672 }
5673 }
5674 return Qfalse;
5675 }
5676 else {
5677 return check_match(ec, pattern, target, type);
5678 }
5679}
5680
5681VALUE
5682rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5683{
5684 return vm_check_match(ec, target, pattern, flag);
5685}
5686
5687static VALUE
5688vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5689{
5690 const VALUE kw_bits = *(ep - bits);
5691
5692 if (FIXNUM_P(kw_bits)) {
5693 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5694 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5695 return Qfalse;
5696 }
5697 else {
5698 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5699 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5700 }
5701 return Qtrue;
5702}
5703
5704static void
5705vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5706{
5707 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5708 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5709 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5710 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5711
5712 switch (flag) {
5713 case RUBY_EVENT_CALL:
5714 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5715 return;
5716 case RUBY_EVENT_C_CALL:
5717 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5718 return;
5719 case RUBY_EVENT_RETURN:
5720 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5721 return;
5723 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5724 return;
5725 }
5726 }
5727}
5728
5729static VALUE
5730vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5731{
5732 if (!rb_const_defined_at(cbase, id)) {
5733 return 0;
5734 }
5735 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5736 return rb_public_const_get_at(cbase, id);
5737 }
5738 else {
5739 return rb_const_get_at(cbase, id);
5740 }
5741}
5742
5743static VALUE
5744vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5745{
5746 if (!RB_TYPE_P(klass, T_CLASS)) {
5747 return 0;
5748 }
5749 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5750 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5751
5752 if (tmp != super) {
5753 rb_raise(rb_eTypeError,
5754 "superclass mismatch for class %"PRIsVALUE"",
5755 rb_id2str(id));
5756 }
5757 else {
5758 return klass;
5759 }
5760 }
5761 else {
5762 return klass;
5763 }
5764}
5765
5766static VALUE
5767vm_check_if_module(ID id, VALUE mod)
5768{
5769 if (!RB_TYPE_P(mod, T_MODULE)) {
5770 return 0;
5771 }
5772 else {
5773 return mod;
5774 }
5775}
5776
5777static VALUE
5778declare_under(ID id, VALUE cbase, VALUE c)
5779{
5780 rb_set_class_path_string(c, cbase, rb_id2str(id));
5781 rb_const_set(cbase, id, c);
5782 return c;
5783}
5784
5785static VALUE
5786vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5787{
5788 /* new class declaration */
5789 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5790 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5792 rb_class_inherited(s, c);
5793 return c;
5794}
5795
5796static VALUE
5797vm_declare_module(ID id, VALUE cbase)
5798{
5799 /* new module declaration */
5800 return declare_under(id, cbase, rb_module_new());
5801}
5802
5803NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5804static void
5805unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5806{
5807 VALUE name = rb_id2str(id);
5808 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5809 name, type);
5810 VALUE location = rb_const_source_location_at(cbase, id);
5811 if (!NIL_P(location)) {
5812 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5813 " previous definition of %"PRIsVALUE" was here",
5814 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5815 }
5817}
5818
5819static VALUE
5820vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5821{
5822 VALUE klass;
5823
5824 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5825 rb_raise(rb_eTypeError,
5826 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5827 rb_obj_class(super));
5828 }
5829
5830 vm_check_if_namespace(cbase);
5831
5832 /* find klass */
5833 rb_autoload_load(cbase, id);
5834
5835 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5836 if (!vm_check_if_class(id, flags, super, klass))
5837 unmatched_redefinition("class", cbase, id, klass);
5838 return klass;
5839 }
5840 else {
5841 return vm_declare_class(id, flags, cbase, super);
5842 }
5843}
5844
5845static VALUE
5846vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5847{
5848 VALUE mod;
5849
5850 vm_check_if_namespace(cbase);
5851 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5852 if (!vm_check_if_module(id, mod))
5853 unmatched_redefinition("module", cbase, id, mod);
5854 return mod;
5855 }
5856 else {
5857 return vm_declare_module(id, cbase);
5858 }
5859}
5860
5861static VALUE
5862vm_find_or_create_class_by_id(ID id,
5863 rb_num_t flags,
5864 VALUE cbase,
5865 VALUE super)
5866{
5867 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5868
5869 switch (type) {
5870 case VM_DEFINECLASS_TYPE_CLASS:
5871 /* classdef returns class scope value */
5872 return vm_define_class(id, flags, cbase, super);
5873
5874 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5875 /* classdef returns class scope value */
5876 return rb_singleton_class(cbase);
5877
5878 case VM_DEFINECLASS_TYPE_MODULE:
5879 /* classdef returns class scope value */
5880 return vm_define_module(id, flags, cbase);
5881
5882 default:
5883 rb_bug("unknown defineclass type: %d", (int)type);
5884 }
5885}
5886
5887static rb_method_visibility_t
5888vm_scope_visibility_get(const rb_execution_context_t *ec)
5889{
5890 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5891
5892 if (!vm_env_cref_by_cref(cfp->ep)) {
5893 return METHOD_VISI_PUBLIC;
5894 }
5895 else {
5896 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5897 }
5898}
5899
5900static int
5901vm_scope_module_func_check(const rb_execution_context_t *ec)
5902{
5903 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5904
5905 if (!vm_env_cref_by_cref(cfp->ep)) {
5906 return FALSE;
5907 }
5908 else {
5909 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5910 }
5911}
5912
5913static void
5914vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5915{
5916 VALUE klass;
5917 rb_method_visibility_t visi;
5918 rb_cref_t *cref = vm_ec_cref(ec);
5919
5920 if (is_singleton) {
5921 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5922 visi = METHOD_VISI_PUBLIC;
5923 }
5924 else {
5925 klass = CREF_CLASS_FOR_DEFINITION(cref);
5926 visi = vm_scope_visibility_get(ec);
5927 }
5928
5929 if (NIL_P(klass)) {
5930 rb_raise(rb_eTypeError, "no class/module to add method");
5931 }
5932
5933 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5934 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5935 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5936 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
5937 }
5938
5939 if (!is_singleton && vm_scope_module_func_check(ec)) {
5940 klass = rb_singleton_class(klass);
5941 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5942 }
5943}
5944
5945static VALUE
5946vm_invokeblock_i(struct rb_execution_context_struct *ec,
5947 struct rb_control_frame_struct *reg_cfp,
5948 struct rb_calling_info *calling)
5949{
5950 const struct rb_callinfo *ci = calling->cd->ci;
5951 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5952
5953 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5954 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5955 }
5956 else {
5957 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5958 }
5959}
5960
5961enum method_explorer_type {
5962 mexp_search_method,
5963 mexp_search_invokeblock,
5964 mexp_search_super,
5965};
5966
5967static inline VALUE
5968vm_sendish(
5969 struct rb_execution_context_struct *ec,
5970 struct rb_control_frame_struct *reg_cfp,
5971 struct rb_call_data *cd,
5972 VALUE block_handler,
5973 enum method_explorer_type method_explorer
5974) {
5975 VALUE val = Qundef;
5976 const struct rb_callinfo *ci = cd->ci;
5977 const struct rb_callcache *cc;
5978 int argc = vm_ci_argc(ci);
5979 VALUE recv = TOPN(argc);
5980 struct rb_calling_info calling = {
5981 .block_handler = block_handler,
5982 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5983 .recv = recv,
5984 .argc = argc,
5985 .cd = cd,
5986 };
5987
5988 switch (method_explorer) {
5989 case mexp_search_method:
5990 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5991 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5992 break;
5993 case mexp_search_super:
5994 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5995 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5996 break;
5997 case mexp_search_invokeblock:
5998 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5999 break;
6000 }
6001 return val;
6002}
6003
6004VALUE
6005rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6006{
6007 stack_check(ec);
6008
6009 struct rb_forwarding_call_data adjusted_cd;
6010 struct rb_callinfo adjusted_ci;
6011
6012 VALUE bh;
6013 VALUE val;
6014
6015 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6016 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6017
6018 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6019
6020 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6021 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6022 }
6023 }
6024 else {
6025 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6026 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6027 }
6028
6029 VM_EXEC(ec, val);
6030 return val;
6031}
6032
6033VALUE
6034rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6035{
6036 stack_check(ec);
6037 VALUE bh = VM_BLOCK_HANDLER_NONE;
6038 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6039 VM_EXEC(ec, val);
6040 return val;
6041}
6042
6043VALUE
6044rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6045{
6046 stack_check(ec);
6047 struct rb_forwarding_call_data adjusted_cd;
6048 struct rb_callinfo adjusted_ci;
6049
6050 VALUE bh;
6051 VALUE val;
6052
6053 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6054 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6055
6056 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6057
6058 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6059 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6060 }
6061 }
6062 else {
6063 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6064 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6065 }
6066
6067 VM_EXEC(ec, val);
6068 return val;
6069}
6070
6071VALUE
6072rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6073{
6074 stack_check(ec);
6075 VALUE bh = VM_BLOCK_HANDLER_NONE;
6076 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6077 VM_EXEC(ec, val);
6078 return val;
6079}
6080
6081/* object.c */
6082VALUE rb_nil_to_s(VALUE);
6083VALUE rb_true_to_s(VALUE);
6084VALUE rb_false_to_s(VALUE);
6085/* numeric.c */
6086VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6087VALUE rb_fix_to_s(VALUE);
6088/* variable.c */
6089VALUE rb_mod_to_s(VALUE);
6091
6092static VALUE
6093vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6094{
6095 int type = TYPE(recv);
6096 if (type == T_STRING) {
6097 return recv;
6098 }
6099
6100 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6101
6102 switch (type) {
6103 case T_SYMBOL:
6104 if (check_method_basic_definition(vm_cc_cme(cc))) {
6105 // rb_sym_to_s() allocates a mutable string, but since we are only
6106 // going to use this string for interpolation, it's fine to use the
6107 // frozen string.
6108 return rb_sym2str(recv);
6109 }
6110 break;
6111 case T_MODULE:
6112 case T_CLASS:
6113 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6114 // rb_mod_to_s() allocates a mutable string, but since we are only
6115 // going to use this string for interpolation, it's fine to use the
6116 // frozen string.
6117 VALUE val = rb_mod_name(recv);
6118 if (NIL_P(val)) {
6119 val = rb_mod_to_s(recv);
6120 }
6121 return val;
6122 }
6123 break;
6124 case T_NIL:
6125 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6126 return rb_nil_to_s(recv);
6127 }
6128 break;
6129 case T_TRUE:
6130 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6131 return rb_true_to_s(recv);
6132 }
6133 break;
6134 case T_FALSE:
6135 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6136 return rb_false_to_s(recv);
6137 }
6138 break;
6139 case T_FIXNUM:
6140 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6141 return rb_fix_to_s(recv);
6142 }
6143 break;
6144 }
6145 return Qundef;
6146}
6147
6148static VALUE
6149vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6150{
6151 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6152 return ary;
6153 }
6154 else {
6155 return Qundef;
6156 }
6157}
6158
6159static VALUE
6160vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6161{
6162 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6163 return hash;
6164 }
6165 else {
6166 return Qundef;
6167 }
6168}
6169
6170static VALUE
6171vm_opt_str_freeze(VALUE str, int bop, ID id)
6172{
6173 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6174 return str;
6175 }
6176 else {
6177 return Qundef;
6178 }
6179}
6180
6181/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6182#define id_cmp idCmp
6183
6184static VALUE
6185vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6186{
6187 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6188 return rb_ary_includes(ary, target);
6189 }
6190 else {
6191 VALUE args[1] = {target};
6192
6193 // duparray
6194 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6195 VALUE dupary = rb_ary_resurrect(ary);
6196
6197 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6198 }
6199}
6200
6201VALUE
6202rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6203{
6204 return vm_opt_duparray_include_p(ec, ary, target);
6205}
6206
6207static VALUE
6208vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6209{
6210 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6211 if (num == 0) {
6212 return Qnil;
6213 }
6214 else {
6215 VALUE result = *ptr;
6216 rb_snum_t i = num - 1;
6217 while (i-- > 0) {
6218 const VALUE v = *++ptr;
6219 if (OPTIMIZED_CMP(v, result) > 0) {
6220 result = v;
6221 }
6222 }
6223 return result;
6224 }
6225 }
6226 else {
6227 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6228 }
6229}
6230
6231VALUE
6232rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6233{
6234 return vm_opt_newarray_max(ec, num, ptr);
6235}
6236
6237static VALUE
6238vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6239{
6240 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6241 if (num == 0) {
6242 return Qnil;
6243 }
6244 else {
6245 VALUE result = *ptr;
6246 rb_snum_t i = num - 1;
6247 while (i-- > 0) {
6248 const VALUE v = *++ptr;
6249 if (OPTIMIZED_CMP(v, result) < 0) {
6250 result = v;
6251 }
6252 }
6253 return result;
6254 }
6255 }
6256 else {
6257 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6258 }
6259}
6260
6261VALUE
6262rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6263{
6264 return vm_opt_newarray_min(ec, num, ptr);
6265}
6266
6267static VALUE
6268vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6269{
6270 // If Array#hash is _not_ monkeypatched, use the optimized call
6271 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6272 return rb_ary_hash_values(num, ptr);
6273 }
6274 else {
6275 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6276 }
6277}
6278
6279VALUE
6280rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6281{
6282 return vm_opt_newarray_hash(ec, num, ptr);
6283}
6284
6285VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6286VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6287
6288static VALUE
6289vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6290{
6291 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6292 struct RArray fake_ary;
6293 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6294 return rb_ary_includes(ary, target);
6295 }
6296 else {
6297 VALUE args[1] = {target};
6298 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6299 }
6300}
6301
6302VALUE
6303rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6304{
6305 return vm_opt_newarray_include_p(ec, num, ptr, target);
6306}
6307
6308static VALUE
6309vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6310{
6311 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6312 struct RArray fake_ary;
6313 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6314 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6315 }
6316 else {
6317 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6318 // Setup an array with room for keyword hash.
6319 VALUE args[2];
6320 args[0] = fmt;
6321 int kw_splat = RB_NO_KEYWORDS;
6322 int argc = 1;
6323
6324 if (!UNDEF_P(buffer)) {
6325 args[1] = rb_hash_new_with_size(1);
6326 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6327 kw_splat = RB_PASS_KEYWORDS;
6328 argc++;
6329 }
6330
6331 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6332 }
6333}
6334
6335VALUE
6336rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6337{
6338 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6339}
6340
6341VALUE
6342rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6343{
6344 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6345}
6346
6347#undef id_cmp
6348
6349static void
6350vm_track_constant_cache(ID id, void *ic)
6351{
6352 rb_vm_t *vm = GET_VM();
6353 struct rb_id_table *const_cache = vm->constant_cache;
6354 VALUE lookup_result;
6355 set_table *ics;
6356
6357 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6358 ics = (set_table *)lookup_result;
6359 }
6360 else {
6361 ics = set_init_numtable();
6362 rb_id_table_insert(const_cache, id, (VALUE)ics);
6363 }
6364
6365 /* The call below to st_insert could allocate which could trigger a GC.
6366 * If it triggers a GC, it may free an iseq that also holds a cache to this
6367 * constant. If that iseq is the last iseq with a cache to this constant, then
6368 * it will free this ST table, which would cause an use-after-free during this
6369 * st_insert.
6370 *
6371 * So to fix this issue, we store the ID that is currently being inserted
6372 * and, in remove_from_constant_cache, we don't free the ST table for ID
6373 * equal to this one.
6374 *
6375 * See [Bug #20921].
6376 */
6377 vm->inserting_constant_cache_id = id;
6378
6379 set_insert(ics, (st_data_t)ic);
6380
6381 vm->inserting_constant_cache_id = (ID)0;
6382}
6383
6384static void
6385vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6386{
6387 RB_VM_LOCKING() {
6388 for (int i = 0; segments[i]; i++) {
6389 ID id = segments[i];
6390 if (id == idNULL) continue;
6391 vm_track_constant_cache(id, ic);
6392 }
6393 }
6394}
6395
6396// For JIT inlining
6397static inline bool
6398vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6399{
6400 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6401 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6402
6403 return (ic_cref == NULL || // no need to check CREF
6404 ic_cref == vm_get_cref(reg_ep));
6405 }
6406 return false;
6407}
6408
6409static bool
6410vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6411{
6412 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6413 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6414}
6415
6416// YJIT needs this function to never allocate and never raise
6417bool
6418rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6419{
6420 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6421}
6422
6423static void
6424vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6425{
6426 if (ruby_vm_const_missing_count > 0) {
6427 ruby_vm_const_missing_count = 0;
6428 ic->entry = NULL;
6429 return;
6430 }
6431
6432 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6433 RB_OBJ_WRITE(ice, &ice->value, val);
6434 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6435 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6436 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6437
6438 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6439 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6440 rb_yjit_constant_ic_update(iseq, ic, pos);
6441}
6442
6443VALUE
6444rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6445{
6446 VALUE val;
6447 const ID *segments = ic->segments;
6448 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6449 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6450 val = ice->value;
6451
6452 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6453 }
6454 else {
6455 ruby_vm_constant_cache_misses++;
6456 val = vm_get_ev_const_chain(ec, segments);
6457 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6458 // Undo the PC increment to get the address to this instruction
6459 // INSN_ATTR(width) == 2
6460 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6461 }
6462 return val;
6463}
6464
6465static VALUE
6466vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6467{
6468 rb_thread_t *th = rb_ec_thread_ptr(ec);
6469 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6470
6471 again:
6472 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6473 return is->once.value;
6474 }
6475 else if (is->once.running_thread == NULL) {
6476 VALUE val;
6477 is->once.running_thread = th;
6478 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6479 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6480 /* is->once.running_thread is cleared by vm_once_clear() */
6481 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6482 return val;
6483 }
6484 else if (is->once.running_thread == th) {
6485 /* recursive once */
6486 return vm_once_exec((VALUE)iseq);
6487 }
6488 else {
6489 /* waiting for finish */
6490 RUBY_VM_CHECK_INTS(ec);
6492 goto again;
6493 }
6494}
6495
6496static OFFSET
6497vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6498{
6499 switch (OBJ_BUILTIN_TYPE(key)) {
6500 case -1:
6501 case T_FLOAT:
6502 case T_SYMBOL:
6503 case T_BIGNUM:
6504 case T_STRING:
6505 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6506 SYMBOL_REDEFINED_OP_FLAG |
6507 INTEGER_REDEFINED_OP_FLAG |
6508 FLOAT_REDEFINED_OP_FLAG |
6509 NIL_REDEFINED_OP_FLAG |
6510 TRUE_REDEFINED_OP_FLAG |
6511 FALSE_REDEFINED_OP_FLAG |
6512 STRING_REDEFINED_OP_FLAG)) {
6513 st_data_t val;
6514 if (RB_FLOAT_TYPE_P(key)) {
6515 double kval = RFLOAT_VALUE(key);
6516 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6517 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6518 }
6519 }
6520 if (rb_hash_stlike_lookup(hash, key, &val)) {
6521 return FIX2LONG((VALUE)val);
6522 }
6523 else {
6524 return else_offset;
6525 }
6526 }
6527 }
6528 return 0;
6529}
6530
6531NORETURN(static void
6532 vm_stack_consistency_error(const rb_execution_context_t *ec,
6533 const rb_control_frame_t *,
6534 const VALUE *));
6535static void
6536vm_stack_consistency_error(const rb_execution_context_t *ec,
6537 const rb_control_frame_t *cfp,
6538 const VALUE *bp)
6539{
6540 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6541 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6542 static const char stack_consistency_error[] =
6543 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6544#if defined RUBY_DEVEL
6545 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6546 rb_str_cat_cstr(mesg, "\n");
6547 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6549#else
6550 rb_bug(stack_consistency_error, nsp, nbp);
6551#endif
6552}
6553
6554static VALUE
6555vm_opt_plus(VALUE recv, VALUE obj)
6556{
6557 if (FIXNUM_2_P(recv, obj) &&
6558 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6559 return rb_fix_plus_fix(recv, obj);
6560 }
6561 else if (FLONUM_2_P(recv, obj) &&
6562 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6563 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6564 }
6565 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6566 return Qundef;
6567 }
6568 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6569 RBASIC_CLASS(obj) == rb_cFloat &&
6570 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6571 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6572 }
6573 else if (RBASIC_CLASS(recv) == rb_cString &&
6574 RBASIC_CLASS(obj) == rb_cString &&
6575 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6576 return rb_str_opt_plus(recv, obj);
6577 }
6578 else if (RBASIC_CLASS(recv) == rb_cArray &&
6579 RBASIC_CLASS(obj) == rb_cArray &&
6580 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6581 return rb_ary_plus(recv, obj);
6582 }
6583 else {
6584 return Qundef;
6585 }
6586}
6587
6588static VALUE
6589vm_opt_minus(VALUE recv, VALUE obj)
6590{
6591 if (FIXNUM_2_P(recv, obj) &&
6592 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6593 return rb_fix_minus_fix(recv, obj);
6594 }
6595 else if (FLONUM_2_P(recv, obj) &&
6596 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6597 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6598 }
6599 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6600 return Qundef;
6601 }
6602 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6603 RBASIC_CLASS(obj) == rb_cFloat &&
6604 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6605 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6606 }
6607 else {
6608 return Qundef;
6609 }
6610}
6611
6612static VALUE
6613vm_opt_mult(VALUE recv, VALUE obj)
6614{
6615 if (FIXNUM_2_P(recv, obj) &&
6616 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6617 return rb_fix_mul_fix(recv, obj);
6618 }
6619 else if (FLONUM_2_P(recv, obj) &&
6620 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6621 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6622 }
6623 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6624 return Qundef;
6625 }
6626 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6627 RBASIC_CLASS(obj) == rb_cFloat &&
6628 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6629 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6630 }
6631 else {
6632 return Qundef;
6633 }
6634}
6635
6636static VALUE
6637vm_opt_div(VALUE recv, VALUE obj)
6638{
6639 if (FIXNUM_2_P(recv, obj) &&
6640 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6641 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6642 }
6643 else if (FLONUM_2_P(recv, obj) &&
6644 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6645 return rb_flo_div_flo(recv, obj);
6646 }
6647 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6648 return Qundef;
6649 }
6650 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6651 RBASIC_CLASS(obj) == rb_cFloat &&
6652 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6653 return rb_flo_div_flo(recv, obj);
6654 }
6655 else {
6656 return Qundef;
6657 }
6658}
6659
6660static VALUE
6661vm_opt_mod(VALUE recv, VALUE obj)
6662{
6663 if (FIXNUM_2_P(recv, obj) &&
6664 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6665 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6666 }
6667 else if (FLONUM_2_P(recv, obj) &&
6668 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6669 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6670 }
6671 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6672 return Qundef;
6673 }
6674 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6675 RBASIC_CLASS(obj) == rb_cFloat &&
6676 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6677 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6678 }
6679 else {
6680 return Qundef;
6681 }
6682}
6683
6684static VALUE
6685vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6686{
6687 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6688 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6689
6690 if (!UNDEF_P(val)) {
6691 return RBOOL(!RTEST(val));
6692 }
6693 }
6694
6695 return Qundef;
6696}
6697
6698static VALUE
6699vm_opt_lt(VALUE recv, VALUE obj)
6700{
6701 if (FIXNUM_2_P(recv, obj) &&
6702 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6703 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6704 }
6705 else if (FLONUM_2_P(recv, obj) &&
6706 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6707 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6708 }
6709 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6710 return Qundef;
6711 }
6712 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6713 RBASIC_CLASS(obj) == rb_cFloat &&
6714 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6715 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6716 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6717 }
6718 else {
6719 return Qundef;
6720 }
6721}
6722
6723static VALUE
6724vm_opt_le(VALUE recv, VALUE obj)
6725{
6726 if (FIXNUM_2_P(recv, obj) &&
6727 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6728 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6729 }
6730 else if (FLONUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6732 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6733 }
6734 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6735 return Qundef;
6736 }
6737 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6738 RBASIC_CLASS(obj) == rb_cFloat &&
6739 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6740 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6741 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6742 }
6743 else {
6744 return Qundef;
6745 }
6746}
6747
6748static VALUE
6749vm_opt_gt(VALUE recv, VALUE obj)
6750{
6751 if (FIXNUM_2_P(recv, obj) &&
6752 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6753 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6754 }
6755 else if (FLONUM_2_P(recv, obj) &&
6756 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6757 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6758 }
6759 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6760 return Qundef;
6761 }
6762 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6763 RBASIC_CLASS(obj) == rb_cFloat &&
6764 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6765 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6766 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6767 }
6768 else {
6769 return Qundef;
6770 }
6771}
6772
6773static VALUE
6774vm_opt_ge(VALUE recv, VALUE obj)
6775{
6776 if (FIXNUM_2_P(recv, obj) &&
6777 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6778 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6779 }
6780 else if (FLONUM_2_P(recv, obj) &&
6781 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6782 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6783 }
6784 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6785 return Qundef;
6786 }
6787 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6788 RBASIC_CLASS(obj) == rb_cFloat &&
6789 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6790 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6791 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6792 }
6793 else {
6794 return Qundef;
6795 }
6796}
6797
6798
6799static VALUE
6800vm_opt_ltlt(VALUE recv, VALUE obj)
6801{
6802 if (SPECIAL_CONST_P(recv)) {
6803 return Qundef;
6804 }
6805 else if (RBASIC_CLASS(recv) == rb_cString &&
6806 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6807 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6808 return rb_str_buf_append(recv, obj);
6809 }
6810 else {
6811 return rb_str_concat(recv, obj);
6812 }
6813 }
6814 else if (RBASIC_CLASS(recv) == rb_cArray &&
6815 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6816 return rb_ary_push(recv, obj);
6817 }
6818 else {
6819 return Qundef;
6820 }
6821}
6822
6823static VALUE
6824vm_opt_and(VALUE recv, VALUE obj)
6825{
6826 // If recv and obj are both fixnums, then the bottom tag bit
6827 // will be 1 on both. 1 & 1 == 1, so the result value will also
6828 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6829 // will be 0, and we return Qundef.
6830 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6831
6832 if (FIXNUM_P(ret) &&
6833 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6834 return ret;
6835 }
6836 else {
6837 return Qundef;
6838 }
6839}
6840
6841static VALUE
6842vm_opt_or(VALUE recv, VALUE obj)
6843{
6844 if (FIXNUM_2_P(recv, obj) &&
6845 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6846 return recv | obj;
6847 }
6848 else {
6849 return Qundef;
6850 }
6851}
6852
6853static VALUE
6854vm_opt_aref(VALUE recv, VALUE obj)
6855{
6856 if (SPECIAL_CONST_P(recv)) {
6857 if (FIXNUM_2_P(recv, obj) &&
6858 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6859 return rb_fix_aref(recv, obj);
6860 }
6861 return Qundef;
6862 }
6863 else if (RBASIC_CLASS(recv) == rb_cArray &&
6864 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6865 if (FIXNUM_P(obj)) {
6866 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6867 }
6868 else {
6869 return rb_ary_aref1(recv, obj);
6870 }
6871 }
6872 else if (RBASIC_CLASS(recv) == rb_cHash &&
6873 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6874 return rb_hash_aref(recv, obj);
6875 }
6876 else {
6877 return Qundef;
6878 }
6879}
6880
6881static VALUE
6882vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6883{
6884 if (SPECIAL_CONST_P(recv)) {
6885 return Qundef;
6886 }
6887 else if (RBASIC_CLASS(recv) == rb_cArray &&
6888 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6889 FIXNUM_P(obj)) {
6890 rb_ary_store(recv, FIX2LONG(obj), set);
6891 return set;
6892 }
6893 else if (RBASIC_CLASS(recv) == rb_cHash &&
6894 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6895 rb_hash_aset(recv, obj, set);
6896 return set;
6897 }
6898 else {
6899 return Qundef;
6900 }
6901}
6902
6903static VALUE
6904vm_opt_aref_with(VALUE recv, VALUE key)
6905{
6906 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6907 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6908 rb_hash_compare_by_id_p(recv) == Qfalse &&
6909 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6910 return rb_hash_aref(recv, key);
6911 }
6912 else {
6913 return Qundef;
6914 }
6915}
6916
6917VALUE
6918rb_vm_opt_aref_with(VALUE recv, VALUE key)
6919{
6920 return vm_opt_aref_with(recv, key);
6921}
6922
6923static VALUE
6924vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6925{
6926 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6927 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6928 rb_hash_compare_by_id_p(recv) == Qfalse) {
6929 return rb_hash_aset(recv, key, val);
6930 }
6931 else {
6932 return Qundef;
6933 }
6934}
6935
6936VALUE
6937rb_vm_opt_aset_with(VALUE recv, VALUE key, VALUE value)
6938{
6939 return vm_opt_aset_with(recv, key, value);
6940}
6941
6942static VALUE
6943vm_opt_length(VALUE recv, int bop)
6944{
6945 if (SPECIAL_CONST_P(recv)) {
6946 return Qundef;
6947 }
6948 else if (RBASIC_CLASS(recv) == rb_cString &&
6949 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6950 if (bop == BOP_EMPTY_P) {
6951 return LONG2NUM(RSTRING_LEN(recv));
6952 }
6953 else {
6954 return rb_str_length(recv);
6955 }
6956 }
6957 else if (RBASIC_CLASS(recv) == rb_cArray &&
6958 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6959 return LONG2NUM(RARRAY_LEN(recv));
6960 }
6961 else if (RBASIC_CLASS(recv) == rb_cHash &&
6962 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6963 return INT2FIX(RHASH_SIZE(recv));
6964 }
6965 else {
6966 return Qundef;
6967 }
6968}
6969
6970static VALUE
6971vm_opt_empty_p(VALUE recv)
6972{
6973 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6974 case Qundef: return Qundef;
6975 case INT2FIX(0): return Qtrue;
6976 default: return Qfalse;
6977 }
6978}
6979
6980VALUE rb_false(VALUE obj);
6981
6982static VALUE
6983vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6984{
6985 if (NIL_P(recv) &&
6986 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6987 return Qtrue;
6988 }
6989 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6990 return Qfalse;
6991 }
6992 else {
6993 return Qundef;
6994 }
6995}
6996
6997static VALUE
6998fix_succ(VALUE x)
6999{
7000 switch (x) {
7001 case ~0UL:
7002 /* 0xFFFF_FFFF == INT2FIX(-1)
7003 * `-1.succ` is of course 0. */
7004 return INT2FIX(0);
7005 case RSHIFT(~0UL, 1):
7006 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7007 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7008 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7009 default:
7010 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7011 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7012 * == lx*2 + ly*2 + 1
7013 * == (lx*2+1) + (ly*2+1) - 1
7014 * == x + y - 1
7015 *
7016 * Here, if we put y := INT2FIX(1):
7017 *
7018 * == x + INT2FIX(1) - 1
7019 * == x + 2 .
7020 */
7021 return x + 2;
7022 }
7023}
7024
7025static VALUE
7026vm_opt_succ(VALUE recv)
7027{
7028 if (FIXNUM_P(recv) &&
7029 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7030 return fix_succ(recv);
7031 }
7032 else if (SPECIAL_CONST_P(recv)) {
7033 return Qundef;
7034 }
7035 else if (RBASIC_CLASS(recv) == rb_cString &&
7036 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7037 return rb_str_succ(recv);
7038 }
7039 else {
7040 return Qundef;
7041 }
7042}
7043
7044static VALUE
7045vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7046{
7047 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7048 return RBOOL(!RTEST(recv));
7049 }
7050 else {
7051 return Qundef;
7052 }
7053}
7054
7055static VALUE
7056vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7057{
7058 if (SPECIAL_CONST_P(recv)) {
7059 return Qundef;
7060 }
7061 else if (RBASIC_CLASS(recv) == rb_cString &&
7062 CLASS_OF(obj) == rb_cRegexp &&
7063 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7064 return rb_reg_match(obj, recv);
7065 }
7066 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7067 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7068 return rb_reg_match(recv, obj);
7069 }
7070 else {
7071 return Qundef;
7072 }
7073}
7074
7075rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7076
7077NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7078
7079static inline void
7080vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7081 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7082 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7083{
7084 rb_event_flag_t event = pc_events & target_event;
7085 VALUE self = GET_SELF();
7086
7087 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7088
7089 if (event & global_hooks->events) {
7090 /* increment PC because source line is calculated with PC-1 */
7091 reg_cfp->pc++;
7092 vm_dtrace(event, ec);
7093 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7094 reg_cfp->pc--;
7095 }
7096
7097 // Load here since global hook above can add and free local hooks
7098 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7099 if (local_hooks != NULL) {
7100 if (event & local_hooks->events) {
7101 /* increment PC because source line is calculated with PC-1 */
7102 reg_cfp->pc++;
7103 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7104 reg_cfp->pc--;
7105 }
7106 }
7107}
7108
7109#define VM_TRACE_HOOK(target_event, val) do { \
7110 if ((pc_events & (target_event)) & enabled_flags) { \
7111 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7112 } \
7113} while (0)
7114
7115static VALUE
7116rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7117{
7118 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7119 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7120 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7121}
7122
7123static void
7124vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7125{
7126 const VALUE *pc = reg_cfp->pc;
7127 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7128 rb_event_flag_t global_events = enabled_flags;
7129
7130 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7131 return;
7132 }
7133 else {
7134 const rb_iseq_t *iseq = reg_cfp->iseq;
7135 VALUE iseq_val = (VALUE)iseq;
7136 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7137 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7138 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7139 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7140 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7141 rb_hook_list_t *bmethod_local_hooks = NULL;
7142 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7143 rb_event_flag_t bmethod_local_events = 0;
7144 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7145 enabled_flags |= iseq_local_events;
7146
7147 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7148
7149 if (bmethod_frame) {
7150 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7151 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7152 bmethod_local_hooks = me->def->body.bmethod.hooks;
7153 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7154 if (bmethod_local_hooks) {
7155 bmethod_local_events = bmethod_local_hooks->events;
7156 }
7157 }
7158
7159
7160 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7161#if 0
7162 /* disable trace */
7163 /* TODO: incomplete */
7164 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7165#else
7166 /* do not disable trace because of performance problem
7167 * (re-enable overhead)
7168 */
7169#endif
7170 return;
7171 }
7172 else if (ec->trace_arg != NULL) {
7173 /* already tracing */
7174 return;
7175 }
7176 else {
7177 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7178 /* Note, not considering iseq local events here since the same
7179 * iseq could be used in multiple bmethods. */
7180 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7181
7182 if (0) {
7183 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7184 (int)pos,
7185 (int)pc_events,
7186 RSTRING_PTR(rb_iseq_path(iseq)),
7187 (int)rb_iseq_line_no(iseq, pos),
7188 RSTRING_PTR(rb_iseq_label(iseq)));
7189 }
7190 VM_ASSERT(reg_cfp->pc == pc);
7191 VM_ASSERT(pc_events != 0);
7192
7193 /* check traces */
7194 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7195 /* b_call instruction running as a method. Fire call event. */
7196 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7197 }
7199 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7200 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7201 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7202 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7203 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7204 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7205 /* b_return instruction running as a method. Fire return event. */
7206 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7207 }
7208
7209 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7210 // We need the pointer to stay valid in case compaction happens in a trace hook.
7211 //
7212 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7213 // storage for `rb_method_definition_t` is not on the GC heap.
7214 RB_GC_GUARD(iseq_val);
7215 }
7216 }
7217}
7218#undef VM_TRACE_HOOK
7219
7220#if VM_CHECK_MODE > 0
7221NORETURN( NOINLINE( COLDFUNC
7222void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7223
7224void
7225Init_vm_stack_canary(void)
7226{
7227 /* This has to be called _after_ our PRNG is properly set up. */
7228 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7229 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7230
7231 vm_stack_canary_was_born = true;
7232 VM_ASSERT(n == 0);
7233}
7234
7235void
7236rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7237{
7238 /* Because a method has already been called, why not call
7239 * another one. */
7240 const char *insn = rb_insns_name(i);
7241 VALUE inspection = rb_inspect(c);
7242 const char *str = StringValueCStr(inspection);
7243
7244 rb_bug("dead canary found at %s: %s", insn, str);
7245}
7246
7247#else
7248void Init_vm_stack_canary(void) { /* nothing to do */ }
7249#endif
7250
7251
7252/* a part of the following code is generated by this ruby script:
7253
725416.times{|i|
7255 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7256 typedef_args.prepend(", ") if i != 0
7257 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7258 call_args.prepend(", ") if i != 0
7259 puts %Q{
7260static VALUE
7261builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7262{
7263 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7264 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7265}}
7266}
7267
7268puts
7269puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
727016.times{|i|
7271 puts " builtin_invoker#{i},"
7272}
7273puts "};"
7274*/
7275
7276static VALUE
7277builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7278{
7279 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7280 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7281}
7282
7283static VALUE
7284builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7285{
7286 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7287 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7288}
7289
7290static VALUE
7291builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7292{
7293 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7294 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7295}
7296
7297static VALUE
7298builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7299{
7300 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7301 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7302}
7303
7304static VALUE
7305builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7306{
7307 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7308 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7309}
7310
7311static VALUE
7312builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7313{
7314 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7315 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7316}
7317
7318static VALUE
7319builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7320{
7321 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7322 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7323}
7324
7325static VALUE
7326builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7327{
7328 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7329 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7330}
7331
7332static VALUE
7333builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7334{
7335 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7336 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7337}
7338
7339static VALUE
7340builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7341{
7342 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7343 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7344}
7345
7346static VALUE
7347builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7348{
7349 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7350 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7351}
7352
7353static VALUE
7354builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7355{
7356 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7357 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7358}
7359
7360static VALUE
7361builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7362{
7363 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7364 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7365}
7366
7367static VALUE
7368builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7369{
7370 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7371 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7372}
7373
7374static VALUE
7375builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7376{
7377 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7378 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7379}
7380
7381static VALUE
7382builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7383{
7384 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7385 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7386}
7387
7388typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7389
7390static builtin_invoker
7391lookup_builtin_invoker(int argc)
7392{
7393 static const builtin_invoker invokers[] = {
7394 builtin_invoker0,
7395 builtin_invoker1,
7396 builtin_invoker2,
7397 builtin_invoker3,
7398 builtin_invoker4,
7399 builtin_invoker5,
7400 builtin_invoker6,
7401 builtin_invoker7,
7402 builtin_invoker8,
7403 builtin_invoker9,
7404 builtin_invoker10,
7405 builtin_invoker11,
7406 builtin_invoker12,
7407 builtin_invoker13,
7408 builtin_invoker14,
7409 builtin_invoker15,
7410 };
7411
7412 return invokers[argc];
7413}
7414
7415static inline VALUE
7416invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7417{
7418 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7419 SETUP_CANARY(canary_p);
7420 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7421 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7422 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7423 return ret;
7424}
7425
7426static VALUE
7427vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7428{
7429 return invoke_bf(ec, cfp, bf, argv);
7430}
7431
7432static VALUE
7433vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7434{
7435 if (0) { // debug print
7436 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7437 for (int i=0; i<bf->argc; i++) {
7438 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7439 }
7440 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7441 (void *)(uintptr_t)bf->func_ptr);
7442 }
7443
7444 if (bf->argc == 0) {
7445 return invoke_bf(ec, cfp, bf, NULL);
7446 }
7447 else {
7448 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7449 return invoke_bf(ec, cfp, bf, argv);
7450 }
7451}
7452
7453// for __builtin_inline!()
7454
7455VALUE
7456rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7457{
7458 const rb_control_frame_t *cfp = ec->cfp;
7459 return cfp->ep[index];
7460}
7461
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2795
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1578
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1470
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1449
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:695
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:64
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2125
VALUE rb_cRegexp
Regexp class.
Definition re.c:2662
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1311
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:657
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:60
VALUE rb_cModule
Module class.
Definition object.c:63
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:233
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:880
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1029
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1952
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3722
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1927
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2009
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1910
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1976
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2042
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3755
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5403
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3721
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3992
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2396
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:911
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1490
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3591
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2094
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4382
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4438
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1447
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:4058
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3426
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:135
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3597
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:421
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2171
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3920
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4460
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:378
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3914
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1426
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1965
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1144
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:999
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:87
SVAR (Special VARiable)
Definition imemo.h:51
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:53
THROW_DATA.
Definition imemo.h:60
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376