Ruby 3.5.0dev (2025-04-03 revision 1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
vm_insnhelper.c (1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/struct.h"
30#include "variable.h"
31
32/* finish iseq array */
33#include "insns.inc"
34#include "insns_info.inc"
35
36extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
37extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
38extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
39extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
40 int argc, const VALUE *argv, int priv);
41
42static const struct rb_callcache vm_empty_cc;
43static const struct rb_callcache vm_empty_cc_for_super;
44
45/* control stack frame */
46
47static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
48
50ruby_vm_special_exception_copy(VALUE exc)
51{
53 rb_obj_copy_ivar(e, exc);
54 return e;
55}
56
57NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58static void
59ec_stack_overflow(rb_execution_context_t *ec, int setup)
60{
61 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62 ec->raised_flag = RAISED_STACKOVERFLOW;
63 if (setup) {
64 VALUE at = rb_ec_backtrace_object(ec);
65 mesg = ruby_vm_special_exception_copy(mesg);
66 rb_ivar_set(mesg, idBt, at);
67 rb_ivar_set(mesg, idBt_locations, at);
68 }
69 ec->errinfo = mesg;
70 EC_JUMP_TAG(ec, TAG_RAISE);
71}
72
73NORETURN(static void vm_stackoverflow(void));
74
75static void
76vm_stackoverflow(void)
77{
78 ec_stack_overflow(GET_EC(), TRUE);
79}
80
81NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93#ifdef USE_SIGALTSTACK
94 ec_stack_overflow(ec, TRUE);
95#else
96 ec_stack_overflow(ec, FALSE);
97#endif
98}
99
100static inline void stack_check(rb_execution_context_t *ec);
101
102#if VM_CHECK_MODE > 0
103static int
104callable_class_p(VALUE klass)
105{
106#if VM_CHECK_MODE >= 2
107 if (!klass) return FALSE;
108 switch (RB_BUILTIN_TYPE(klass)) {
109 default:
110 break;
111 case T_ICLASS:
112 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
113 case T_MODULE:
114 return TRUE;
115 }
116 while (klass) {
117 if (klass == rb_cBasicObject) {
118 return TRUE;
119 }
120 klass = RCLASS_SUPER(klass);
121 }
122 return FALSE;
123#else
124 return klass != 0;
125#endif
126}
127
128static int
129callable_method_entry_p(const rb_callable_method_entry_t *cme)
130{
131 if (cme == NULL) {
132 return TRUE;
133 }
134 else {
135 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
136
137 if (callable_class_p(cme->defined_class)) {
138 return TRUE;
139 }
140 else {
141 return FALSE;
142 }
143 }
144}
145
146static void
147vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
148{
149 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
150 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
151
152 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
153 cref_or_me_type = imemo_type(cref_or_me);
154 }
155 if (type & VM_FRAME_FLAG_BMETHOD) {
156 req_me = TRUE;
157 }
158
159 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
160 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
161 }
162 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
163 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
164 }
165
166 if (req_me) {
167 if (cref_or_me_type != imemo_ment) {
168 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
169 }
170 }
171 else {
172 if (req_cref && cref_or_me_type != imemo_cref) {
173 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
174 }
175 else { /* cref or Qfalse */
176 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
177 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
178 /* ignore */
179 }
180 else {
181 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
182 }
183 }
184 }
185 }
186
187 if (cref_or_me_type == imemo_ment) {
188 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
189
190 if (!callable_method_entry_p(me)) {
191 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
192 }
193 }
194
195 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
196 VM_ASSERT(iseq == NULL ||
197 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
198 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
199 );
200 }
201 else {
202 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
203 }
204}
205
206static void
207vm_check_frame(VALUE type,
208 VALUE specval,
209 VALUE cref_or_me,
210 const rb_iseq_t *iseq)
211{
212 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
213 VM_ASSERT(FIXNUM_P(type));
214
215#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
216 case magic: \
217 vm_check_frame_detail(type, req_block, req_me, req_cref, \
218 specval, cref_or_me, is_cframe, iseq); \
219 break
220 switch (given_magic) {
221 /* BLK ME CREF CFRAME */
222 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
224 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
225 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
226 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
227 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
228 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
229 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
230 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
231 default:
232 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
233 }
234#undef CHECK
235}
236
237static VALUE vm_stack_canary; /* Initialized later */
238static bool vm_stack_canary_was_born = false;
239
240// Return the index of the instruction right before the given PC.
241// This is needed because insn_entry advances PC before the insn body.
242static unsigned int
243previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
244{
245 unsigned int pos = 0;
246 while (pos < ISEQ_BODY(iseq)->iseq_size) {
247 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
248 unsigned int next_pos = pos + insn_len(opcode);
249 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
250 return pos;
251 }
252 pos = next_pos;
253 }
254 rb_bug("failed to find the previous insn");
255}
256
257void
258rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
259{
260 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
261 const struct rb_iseq_struct *iseq;
262
263 if (! LIKELY(vm_stack_canary_was_born)) {
264 return; /* :FIXME: isn't it rather fatal to enter this branch? */
265 }
266 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
267 /* This is at the very beginning of a thread. cfp does not exist. */
268 return;
269 }
270 else if (! (iseq = GET_ISEQ())) {
271 return;
272 }
273 else if (LIKELY(sp[0] != vm_stack_canary)) {
274 return;
275 }
276 else {
277 /* we are going to call methods below; squash the canary to
278 * prevent infinite loop. */
279 sp[0] = Qundef;
280 }
281
282 const VALUE *orig = rb_iseq_original_iseq(iseq);
283 const VALUE iseqw = rb_iseqw_new(iseq);
284 const VALUE inspection = rb_inspect(iseqw);
285 const char *stri = rb_str_to_cstr(inspection);
286 const VALUE disasm = rb_iseq_disasm(iseq);
287 const char *strd = rb_str_to_cstr(disasm);
288 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
289 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
290 const char *name = insn_name(insn);
291
292 /* rb_bug() is not capable of outputting this large contents. It
293 is designed to run form a SIGSEGV handler, which tends to be
294 very restricted. */
295 ruby_debug_printf(
296 "We are killing the stack canary set by %s, "
297 "at %s@pc=%"PRIdPTR"\n"
298 "watch out the C stack trace.\n"
299 "%s",
300 name, stri, pos, strd);
301 rb_bug("see above.");
302}
303#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
304
305#else
306#define vm_check_canary(ec, sp)
307#define vm_check_frame(a, b, c, d)
308#endif /* VM_CHECK_MODE > 0 */
309
310#if USE_DEBUG_COUNTER
311static void
312vm_push_frame_debug_counter_inc(
313 const struct rb_execution_context_struct *ec,
314 const struct rb_control_frame_struct *reg_cfp,
315 VALUE type)
316{
317 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
318
319 RB_DEBUG_COUNTER_INC(frame_push);
320
321 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
322 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
323 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
324 if (prev) {
325 if (curr) {
326 RB_DEBUG_COUNTER_INC(frame_R2R);
327 }
328 else {
329 RB_DEBUG_COUNTER_INC(frame_R2C);
330 }
331 }
332 else {
333 if (curr) {
334 RB_DEBUG_COUNTER_INC(frame_C2R);
335 }
336 else {
337 RB_DEBUG_COUNTER_INC(frame_C2C);
338 }
339 }
340 }
341
342 switch (type & VM_FRAME_MAGIC_MASK) {
343 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
344 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
345 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
346 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
347 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
348 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
349 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
350 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
351 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
352 }
353
354 rb_bug("unreachable");
355}
356#else
357#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
358#endif
359
360// Return a poison value to be set above the stack top to verify leafness.
361VALUE
362rb_vm_stack_canary(void)
363{
364#if VM_CHECK_MODE > 0
365 return vm_stack_canary;
366#else
367 return 0;
368#endif
369}
370
371STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
372STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
373STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
374
375static void
376vm_push_frame(rb_execution_context_t *ec,
377 const rb_iseq_t *iseq,
378 VALUE type,
379 VALUE self,
380 VALUE specval,
381 VALUE cref_or_me,
382 const VALUE *pc,
383 VALUE *sp,
384 int local_size,
385 int stack_max)
386{
387 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
388
389 vm_check_frame(type, specval, cref_or_me, iseq);
390 VM_ASSERT(local_size >= 0);
391
392 /* check stack overflow */
393 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
394 vm_check_canary(ec, sp);
395
396 /* setup vm value stack */
397
398 /* initialize local variables */
399 for (int i=0; i < local_size; i++) {
400 *sp++ = Qnil;
401 }
402
403 /* setup ep with managing data */
404 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
405 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
406 *sp++ = type; /* ep[-0] / ENV_FLAGS */
407
408 /* setup new frame */
409 *cfp = (const struct rb_control_frame_struct) {
410 .pc = pc,
411 .sp = sp,
412 .iseq = iseq,
413 .self = self,
414 .ep = sp - 1,
415 .block_code = NULL,
416#if VM_DEBUG_BP_CHECK
417 .bp_check = sp,
418#endif
419 .jit_return = NULL
420 };
421
422 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
423 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
424 future/untested compilers/platforms. */
425
426 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
427 atomic_signal_fence(memory_order_seq_cst);
428 #endif
429
430 ec->cfp = cfp;
431
432 if (VMDEBUG == 2) {
433 SDR();
434 }
435 vm_push_frame_debug_counter_inc(ec, cfp, type);
436}
437
438void
439rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
440{
441 rb_control_frame_t *cfp = ec->cfp;
442
443 if (VMDEBUG == 2) SDR();
444
445 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
446}
447
448/* return TRUE if the frame is finished */
449static inline int
450vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
451{
452 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
453
454 if (VMDEBUG == 2) SDR();
455
456 RUBY_VM_CHECK_INTS(ec);
457 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
458
459 return flags & VM_FRAME_FLAG_FINISH;
460}
461
462void
463rb_vm_pop_frame(rb_execution_context_t *ec)
464{
465 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
466}
467
468// it pushes pseudo-frame with fname filename.
469VALUE
470rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
471{
472 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
473 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
474
475 vm_push_frame(ec,
476 dmy_iseq, //const rb_iseq_t *iseq,
477 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
478 ec->cfp->self, // VALUE self,
479 VM_BLOCK_HANDLER_NONE, // VALUE specval,
480 Qfalse, // VALUE cref_or_me,
481 NULL, // const VALUE *pc,
482 ec->cfp->sp, // VALUE *sp,
483 0, // int local_size,
484 0); // int stack_max
485
486 return (VALUE)dmy_iseq;
487}
488
489/* method dispatch */
490static inline VALUE
491rb_arity_error_new(int argc, int min, int max)
492{
493 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
494 if (min == max) {
495 /* max is not needed */
496 }
497 else if (max == UNLIMITED_ARGUMENTS) {
498 rb_str_cat_cstr(err_mess, "+");
499 }
500 else {
501 rb_str_catf(err_mess, "..%d", max);
502 }
503 rb_str_cat_cstr(err_mess, ")");
504 return rb_exc_new3(rb_eArgError, err_mess);
505}
506
507void
508rb_error_arity(int argc, int min, int max)
509{
510 rb_exc_raise(rb_arity_error_new(argc, min, max));
511}
512
513/* lvar */
514
515NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
516
517static void
518vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
519{
520 /* remember env value forcely */
521 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
522 VM_FORCE_WRITE(&ep[index], v);
523 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
524 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
525}
526
527// YJIT assumes this function never runs GC
528static inline void
529vm_env_write(const VALUE *ep, int index, VALUE v)
530{
531 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
532 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
533 VM_STACK_ENV_WRITE(ep, index, v);
534 }
535 else {
536 vm_env_write_slowpath(ep, index, v);
537 }
538}
539
540void
541rb_vm_env_write(const VALUE *ep, int index, VALUE v)
542{
543 vm_env_write(ep, index, v);
544}
545
546VALUE
547rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
548{
549 if (block_handler == VM_BLOCK_HANDLER_NONE) {
550 return Qnil;
551 }
552 else {
553 switch (vm_block_handler_type(block_handler)) {
554 case block_handler_type_iseq:
555 case block_handler_type_ifunc:
556 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
557 case block_handler_type_symbol:
558 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
559 case block_handler_type_proc:
560 return VM_BH_TO_PROC(block_handler);
561 default:
562 VM_UNREACHABLE(rb_vm_bh_to_procval);
563 }
564 }
565}
566
567/* svar */
568
569#if VM_CHECK_MODE > 0
570static int
571vm_svar_valid_p(VALUE svar)
572{
573 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
574 switch (imemo_type(svar)) {
575 case imemo_svar:
576 case imemo_cref:
577 case imemo_ment:
578 return TRUE;
579 default:
580 break;
581 }
582 }
583 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
584 return FALSE;
585}
586#endif
587
588static inline struct vm_svar *
589lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
590{
591 VALUE svar;
592
593 if (lep && (ec == NULL || ec->root_lep != lep)) {
594 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
595 }
596 else {
597 svar = ec->root_svar;
598 }
599
600 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
601
602 return (struct vm_svar *)svar;
603}
604
605static inline void
606lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
607{
608 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
609
610 if (lep && (ec == NULL || ec->root_lep != lep)) {
611 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
612 }
613 else {
614 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
615 }
616}
617
618static VALUE
619lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
620{
621 const struct vm_svar *svar = lep_svar(ec, lep);
622
623 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
624
625 switch (key) {
626 case VM_SVAR_LASTLINE:
627 return svar->lastline;
628 case VM_SVAR_BACKREF:
629 return svar->backref;
630 default: {
631 const VALUE ary = svar->others;
632
633 if (NIL_P(ary)) {
634 return Qnil;
635 }
636 else {
637 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
638 }
639 }
640 }
641}
642
643static struct vm_svar *
644svar_new(VALUE obj)
645{
646 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
647 *((VALUE *)&svar->lastline) = Qnil;
648 *((VALUE *)&svar->backref) = Qnil;
649 *((VALUE *)&svar->others) = Qnil;
650
651 return svar;
652}
653
654static void
655lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
656{
657 struct vm_svar *svar = lep_svar(ec, lep);
658
659 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
660 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
661 }
662
663 switch (key) {
664 case VM_SVAR_LASTLINE:
665 RB_OBJ_WRITE(svar, &svar->lastline, val);
666 return;
667 case VM_SVAR_BACKREF:
668 RB_OBJ_WRITE(svar, &svar->backref, val);
669 return;
670 default: {
671 VALUE ary = svar->others;
672
673 if (NIL_P(ary)) {
674 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
675 }
676 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
677 }
678 }
679}
680
681static inline VALUE
682vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
683{
684 VALUE val;
685
686 if (type == 0) {
687 val = lep_svar_get(ec, lep, key);
688 }
689 else {
690 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
691
692 if (type & 0x01) {
693 switch (type >> 1) {
694 case '&':
695 val = rb_reg_last_match(backref);
696 break;
697 case '`':
698 val = rb_reg_match_pre(backref);
699 break;
700 case '\'':
701 val = rb_reg_match_post(backref);
702 break;
703 case '+':
704 val = rb_reg_match_last(backref);
705 break;
706 default:
707 rb_bug("unexpected back-ref");
708 }
709 }
710 else {
711 val = rb_reg_nth_match((int)(type >> 1), backref);
712 }
713 }
714 return val;
715}
716
717static inline VALUE
718vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
719{
720 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
721 int nth = 0;
722
723 if (type & 0x01) {
724 switch (type >> 1) {
725 case '&':
726 case '`':
727 case '\'':
728 break;
729 case '+':
730 return rb_reg_last_defined(backref);
731 default:
732 rb_bug("unexpected back-ref");
733 }
734 }
735 else {
736 nth = (int)(type >> 1);
737 }
738 return rb_reg_nth_defined(nth, backref);
739}
740
741PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
743check_method_entry(VALUE obj, int can_be_svar)
744{
745 if (obj == Qfalse) return NULL;
746
747#if VM_CHECK_MODE > 0
748 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
749#endif
750
751 switch (imemo_type(obj)) {
752 case imemo_ment:
753 return (rb_callable_method_entry_t *)obj;
754 case imemo_cref:
755 return NULL;
756 case imemo_svar:
757 if (can_be_svar) {
758 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
759 }
760 default:
761#if VM_CHECK_MODE > 0
762 rb_bug("check_method_entry: svar should not be there:");
763#endif
764 return NULL;
765 }
766}
767
769rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
770{
771 const VALUE *ep = cfp->ep;
773
774 while (!VM_ENV_LOCAL_P(ep)) {
775 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
776 ep = VM_ENV_PREV_EP(ep);
777 }
778
779 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
780}
781
782static const rb_iseq_t *
783method_entry_iseqptr(const rb_callable_method_entry_t *me)
784{
785 switch (me->def->type) {
786 case VM_METHOD_TYPE_ISEQ:
787 return me->def->body.iseq.iseqptr;
788 default:
789 return NULL;
790 }
791}
792
793static rb_cref_t *
794method_entry_cref(const rb_callable_method_entry_t *me)
795{
796 switch (me->def->type) {
797 case VM_METHOD_TYPE_ISEQ:
798 return me->def->body.iseq.cref;
799 default:
800 return NULL;
801 }
802}
803
804#if VM_CHECK_MODE == 0
805PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
806#endif
807static rb_cref_t *
808check_cref(VALUE obj, int can_be_svar)
809{
810 if (obj == Qfalse) return NULL;
811
812#if VM_CHECK_MODE > 0
813 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
814#endif
815
816 switch (imemo_type(obj)) {
817 case imemo_ment:
818 return method_entry_cref((rb_callable_method_entry_t *)obj);
819 case imemo_cref:
820 return (rb_cref_t *)obj;
821 case imemo_svar:
822 if (can_be_svar) {
823 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
824 }
825 default:
826#if VM_CHECK_MODE > 0
827 rb_bug("check_method_entry: svar should not be there:");
828#endif
829 return NULL;
830 }
831}
832
833static inline rb_cref_t *
834vm_env_cref(const VALUE *ep)
835{
836 rb_cref_t *cref;
837
838 while (!VM_ENV_LOCAL_P(ep)) {
839 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
840 ep = VM_ENV_PREV_EP(ep);
841 }
842
843 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
844}
845
846static int
847is_cref(const VALUE v, int can_be_svar)
848{
849 if (RB_TYPE_P(v, T_IMEMO)) {
850 switch (imemo_type(v)) {
851 case imemo_cref:
852 return TRUE;
853 case imemo_svar:
854 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
855 default:
856 break;
857 }
858 }
859 return FALSE;
860}
861
862static int
863vm_env_cref_by_cref(const VALUE *ep)
864{
865 while (!VM_ENV_LOCAL_P(ep)) {
866 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
867 ep = VM_ENV_PREV_EP(ep);
868 }
869 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
870}
871
872static rb_cref_t *
873cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
874{
875 const VALUE v = *vptr;
876 rb_cref_t *cref, *new_cref;
877
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 cref = (rb_cref_t *)v;
882 new_cref = vm_cref_dup(cref);
883 if (parent) {
884 RB_OBJ_WRITE(parent, vptr, new_cref);
885 }
886 else {
887 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
888 }
889 return (rb_cref_t *)new_cref;
890 case imemo_svar:
891 if (can_be_svar) {
892 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
893 }
894 /* fall through */
895 case imemo_ment:
896 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
897 default:
898 break;
899 }
900 }
901 return NULL;
902}
903
904static rb_cref_t *
905vm_cref_replace_with_duplicated_cref(const VALUE *ep)
906{
907 if (vm_env_cref_by_cref(ep)) {
908 rb_cref_t *cref;
909 VALUE envval;
910
911 while (!VM_ENV_LOCAL_P(ep)) {
912 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
913 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
914 return cref;
915 }
916 ep = VM_ENV_PREV_EP(ep);
917 }
918 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
919 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
920 }
921 else {
922 rb_bug("vm_cref_dup: unreachable");
923 }
924}
925
926static rb_cref_t *
927vm_get_cref(const VALUE *ep)
928{
929 rb_cref_t *cref = vm_env_cref(ep);
930
931 if (cref != NULL) {
932 return cref;
933 }
934 else {
935 rb_bug("vm_get_cref: unreachable");
936 }
937}
938
939rb_cref_t *
940rb_vm_get_cref(const VALUE *ep)
941{
942 return vm_get_cref(ep);
943}
944
945static rb_cref_t *
946vm_ec_cref(const rb_execution_context_t *ec)
947{
948 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
949
950 if (cfp == NULL) {
951 return NULL;
952 }
953 return vm_get_cref(cfp->ep);
954}
955
956static const rb_cref_t *
957vm_get_const_key_cref(const VALUE *ep)
958{
959 const rb_cref_t *cref = vm_get_cref(ep);
960 const rb_cref_t *key_cref = cref;
961
962 while (cref) {
963 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
964 RCLASS_EXT(CREF_CLASS(cref))->cloned) {
965 return key_cref;
966 }
967 cref = CREF_NEXT(cref);
968 }
969
970 /* does not include singleton class */
971 return NULL;
972}
973
974void
975rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
976{
977 rb_cref_t *new_cref;
978
979 while (cref) {
980 if (CREF_CLASS(cref) == old_klass) {
981 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
982 *new_cref_ptr = new_cref;
983 return;
984 }
985 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
986 cref = CREF_NEXT(cref);
987 *new_cref_ptr = new_cref;
988 new_cref_ptr = &new_cref->next;
989 }
990 *new_cref_ptr = NULL;
991}
992
993static rb_cref_t *
994vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
995{
996 rb_cref_t *prev_cref = NULL;
997
998 if (ep) {
999 prev_cref = vm_env_cref(ep);
1000 }
1001 else {
1002 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1003
1004 if (cfp) {
1005 prev_cref = vm_env_cref(cfp->ep);
1006 }
1007 }
1008
1009 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1010}
1011
1012static inline VALUE
1013vm_get_cbase(const VALUE *ep)
1014{
1015 const rb_cref_t *cref = vm_get_cref(ep);
1016
1017 return CREF_CLASS_FOR_DEFINITION(cref);
1018}
1019
1020static inline VALUE
1021vm_get_const_base(const VALUE *ep)
1022{
1023 const rb_cref_t *cref = vm_get_cref(ep);
1024
1025 while (cref) {
1026 if (!CREF_PUSHED_BY_EVAL(cref)) {
1027 return CREF_CLASS_FOR_DEFINITION(cref);
1028 }
1029 cref = CREF_NEXT(cref);
1030 }
1031
1032 return Qundef;
1033}
1034
1035static inline void
1036vm_check_if_namespace(VALUE klass)
1037{
1038 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1039 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1040 }
1041}
1042
1043static inline void
1044vm_ensure_not_refinement_module(VALUE self)
1045{
1046 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1047 rb_warn("not defined at the refinement, but at the outer class/module");
1048 }
1049}
1050
1051static inline VALUE
1052vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1053{
1054 return klass;
1055}
1056
1057static inline VALUE
1058vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1059{
1060 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1061 VALUE val;
1062
1063 if (NIL_P(orig_klass) && allow_nil) {
1064 /* in current lexical scope */
1065 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1066 const rb_cref_t *cref;
1067 VALUE klass = Qnil;
1068
1069 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1070 root_cref = CREF_NEXT(root_cref);
1071 }
1072 cref = root_cref;
1073 while (cref && CREF_NEXT(cref)) {
1074 if (CREF_PUSHED_BY_EVAL(cref)) {
1075 klass = Qnil;
1076 }
1077 else {
1078 klass = CREF_CLASS(cref);
1079 }
1080 cref = CREF_NEXT(cref);
1081
1082 if (!NIL_P(klass)) {
1083 VALUE av, am = 0;
1084 rb_const_entry_t *ce;
1085 search_continue:
1086 if ((ce = rb_const_lookup(klass, id))) {
1087 rb_const_warn_if_deprecated(ce, klass, id);
1088 val = ce->value;
1089 if (UNDEF_P(val)) {
1090 if (am == klass) break;
1091 am = klass;
1092 if (is_defined) return 1;
1093 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1094 rb_autoload_load(klass, id);
1095 goto search_continue;
1096 }
1097 else {
1098 if (is_defined) {
1099 return 1;
1100 }
1101 else {
1102 if (UNLIKELY(!rb_ractor_main_p())) {
1103 if (!rb_ractor_shareable_p(val)) {
1104 rb_raise(rb_eRactorIsolationError,
1105 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1106 }
1107 }
1108 return val;
1109 }
1110 }
1111 }
1112 }
1113 }
1114
1115 /* search self */
1116 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1117 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1118 }
1119 else {
1120 klass = CLASS_OF(ec->cfp->self);
1121 }
1122
1123 if (is_defined) {
1124 return rb_const_defined(klass, id);
1125 }
1126 else {
1127 return rb_const_get(klass, id);
1128 }
1129 }
1130 else {
1131 vm_check_if_namespace(orig_klass);
1132 if (is_defined) {
1133 return rb_public_const_defined_from(orig_klass, id);
1134 }
1135 else {
1136 return rb_public_const_get_from(orig_klass, id);
1137 }
1138 }
1139}
1140
1141VALUE
1142rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1143{
1144 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1145}
1146
1147static inline VALUE
1148vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1149{
1150 VALUE val = Qnil;
1151 int idx = 0;
1152 int allow_nil = TRUE;
1153 if (segments[0] == idNULL) {
1154 val = rb_cObject;
1155 idx++;
1156 allow_nil = FALSE;
1157 }
1158 while (segments[idx]) {
1159 ID id = segments[idx++];
1160 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1161 allow_nil = FALSE;
1162 }
1163 return val;
1164}
1165
1166
1167static inline VALUE
1168vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1169{
1170 VALUE klass;
1171
1172 if (!cref) {
1173 rb_bug("vm_get_cvar_base: no cref");
1174 }
1175
1176 while (CREF_NEXT(cref) &&
1177 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1178 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1179 cref = CREF_NEXT(cref);
1180 }
1181 if (top_level_raise && !CREF_NEXT(cref)) {
1182 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1183 }
1184
1185 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1186
1187 if (NIL_P(klass)) {
1188 rb_raise(rb_eTypeError, "no class variables available");
1189 }
1190 return klass;
1191}
1192
1193ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1194static inline void
1195fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1196{
1197 if (is_attr) {
1198 vm_cc_attr_index_set(cc, index, shape_id);
1199 }
1200 else {
1201 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1202 }
1203}
1204
1205#define ractor_incidental_shareable_p(cond, val) \
1206 (!(cond) || rb_ractor_shareable_p(val))
1207#define ractor_object_incidental_shareable_p(obj, val) \
1208 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1209
1210#define ATTR_INDEX_NOT_SET (attr_index_t)-1
1211
1212ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1213static inline VALUE
1214vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1215{
1216#if OPT_IC_FOR_IVAR
1217 VALUE val = Qundef;
1218 shape_id_t shape_id;
1219 VALUE * ivar_list;
1220
1221 if (SPECIAL_CONST_P(obj)) {
1222 return default_value;
1223 }
1224
1225#if SHAPE_IN_BASIC_FLAGS
1226 shape_id = RBASIC_SHAPE_ID(obj);
1227#endif
1228
1229 switch (BUILTIN_TYPE(obj)) {
1230 case T_OBJECT:
1231 ivar_list = ROBJECT_IVPTR(obj);
1232 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1233
1234#if !SHAPE_IN_BASIC_FLAGS
1235 shape_id = ROBJECT_SHAPE_ID(obj);
1236#endif
1237 break;
1238 case T_CLASS:
1239 case T_MODULE:
1240 {
1241 if (UNLIKELY(!rb_ractor_main_p())) {
1242 // For two reasons we can only use the fast path on the main
1243 // ractor.
1244 // First, only the main ractor is allowed to set ivars on classes
1245 // and modules. So we can skip locking.
1246 // Second, other ractors need to check the shareability of the
1247 // values returned from the class ivars.
1248
1249 if (default_value == Qundef) { // defined?
1250 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1251 }
1252 else {
1253 goto general_path;
1254 }
1255 }
1256
1257 ivar_list = RCLASS_IVPTR(obj);
1258
1259#if !SHAPE_IN_BASIC_FLAGS
1260 shape_id = RCLASS_SHAPE_ID(obj);
1261#endif
1262
1263 break;
1264 }
1265 default:
1266 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1267 struct gen_ivtbl *ivtbl;
1268 rb_gen_ivtbl_get(obj, id, &ivtbl);
1269#if !SHAPE_IN_BASIC_FLAGS
1270 shape_id = ivtbl->shape_id;
1271#endif
1272 ivar_list = ivtbl->as.shape.ivptr;
1273 }
1274 else {
1275 return default_value;
1276 }
1277 }
1278
1279 shape_id_t cached_id;
1280 attr_index_t index;
1281
1282 if (is_attr) {
1283 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1284 }
1285 else {
1286 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1287 }
1288
1289 if (LIKELY(cached_id == shape_id)) {
1290 RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1291
1292 if (index == ATTR_INDEX_NOT_SET) {
1293 return default_value;
1294 }
1295
1296 val = ivar_list[index];
1297#if USE_DEBUG_COUNTER
1298 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1299
1300 if (RB_TYPE_P(obj, T_OBJECT)) {
1301 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1302 }
1303#endif
1304 RUBY_ASSERT(!UNDEF_P(val));
1305 }
1306 else { // cache miss case
1307#if USE_DEBUG_COUNTER
1308 if (is_attr) {
1309 if (cached_id != INVALID_SHAPE_ID) {
1310 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1311 }
1312 else {
1313 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1314 }
1315 }
1316 else {
1317 if (cached_id != INVALID_SHAPE_ID) {
1318 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1319 }
1320 else {
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1322 }
1323 }
1324 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1325
1326 if (RB_TYPE_P(obj, T_OBJECT)) {
1327 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1328 }
1329#endif
1330
1331 if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
1332 st_table *table = NULL;
1333 switch (BUILTIN_TYPE(obj)) {
1334 case T_CLASS:
1335 case T_MODULE:
1336 table = (st_table *)RCLASS_IVPTR(obj);
1337 break;
1338
1339 case T_OBJECT:
1340 table = ROBJECT_IV_HASH(obj);
1341 break;
1342
1343 default: {
1344 struct gen_ivtbl *ivtbl;
1345 if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
1346 table = ivtbl->as.complex.table;
1347 }
1348 break;
1349 }
1350 }
1351
1352 if (!table || !st_lookup(table, id, &val)) {
1353 val = default_value;
1354 }
1355 }
1356 else {
1357 shape_id_t previous_cached_id = cached_id;
1358 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1359 // This fills in the cache with the shared cache object.
1360 // "ent" is the shared cache object
1361 if (cached_id != previous_cached_id) {
1362 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1363 }
1364
1365 if (index == ATTR_INDEX_NOT_SET) {
1366 val = default_value;
1367 }
1368 else {
1369 // We fetched the ivar list above
1370 val = ivar_list[index];
1371 RUBY_ASSERT(!UNDEF_P(val));
1372 }
1373 }
1374 else {
1375 if (is_attr) {
1376 vm_cc_attr_index_initialize(cc, shape_id);
1377 }
1378 else {
1379 vm_ic_attr_index_initialize(ic, shape_id);
1380 }
1381
1382 val = default_value;
1383 }
1384 }
1385
1386 }
1387
1388 if (!UNDEF_P(default_value)) {
1389 RUBY_ASSERT(!UNDEF_P(val));
1390 }
1391
1392 return val;
1393
1394general_path:
1395#endif /* OPT_IC_FOR_IVAR */
1396 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1397
1398 if (is_attr) {
1399 return rb_attr_get(obj, id);
1400 }
1401 else {
1402 return rb_ivar_get(obj, id);
1403 }
1404}
1405
1406static void
1407populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1408{
1409 RUBY_ASSERT(next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1410
1411 // Cache population code
1412 if (is_attr) {
1413 vm_cc_attr_index_set(cc, index, next_shape_id);
1414 }
1415 else {
1416 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1417 }
1418}
1419
1420ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1421NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1422NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1423
1424static VALUE
1425vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1426{
1427#if OPT_IC_FOR_IVAR
1428 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1429
1430 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1431 rb_check_frozen(obj);
1432
1433 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1434
1435 shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1436
1437 if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
1438 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1439 }
1440
1441 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1442 return val;
1443 }
1444#endif
1445 return rb_ivar_set(obj, id, val);
1446}
1447
1448static VALUE
1449vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1450{
1451 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1452}
1453
1454static VALUE
1455vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1456{
1457 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1458}
1459
1460NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1461static VALUE
1462vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1463{
1464#if SHAPE_IN_BASIC_FLAGS
1465 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1466#else
1467 shape_id_t shape_id = rb_generic_shape_id(obj);
1468#endif
1469
1470 struct gen_ivtbl *ivtbl = 0;
1471
1472 // Cache hit case
1473 if (shape_id == dest_shape_id) {
1474 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1475 }
1476 else if (dest_shape_id != INVALID_SHAPE_ID) {
1477 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1478 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1479
1480 if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1481 RUBY_ASSERT(index < dest_shape->capacity);
1482 }
1483 else {
1484 return Qundef;
1485 }
1486 }
1487 else {
1488 return Qundef;
1489 }
1490
1491 rb_gen_ivtbl_get(obj, 0, &ivtbl);
1492
1493 if (shape_id != dest_shape_id) {
1494#if SHAPE_IN_BASIC_FLAGS
1495 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1496#else
1497 ivtbl->shape_id = dest_shape_id;
1498#endif
1499 }
1500
1501 RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508static inline VALUE
1509vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1510{
1511#if OPT_IC_FOR_IVAR
1512 switch (BUILTIN_TYPE(obj)) {
1513 case T_OBJECT:
1514 {
1515 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1516
1517 shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1518 RUBY_ASSERT(dest_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1519
1520 if (LIKELY(shape_id == dest_shape_id)) {
1521 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1522 VM_ASSERT(!rb_ractor_shareable_p(obj));
1523 }
1524 else if (dest_shape_id != INVALID_SHAPE_ID) {
1525 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1526 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1527 shape_id_t source_shape_id = dest_shape->parent_id;
1528
1529 if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1530 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1531
1532 ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1533
1534 RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id), id) == dest_shape);
1535 RUBY_ASSERT(index < dest_shape->capacity);
1536 }
1537 else {
1538 break;
1539 }
1540 }
1541 else {
1542 break;
1543 }
1544
1545 VALUE *ptr = ROBJECT_IVPTR(obj);
1546
1547 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
1548 RB_OBJ_WRITE(obj, &ptr[index], val);
1549
1550 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1551 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1552 return val;
1553 }
1554 break;
1555 case T_CLASS:
1556 case T_MODULE:
1557 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1558 default:
1559 break;
1560 }
1561
1562 return Qundef;
1563#endif /* OPT_IC_FOR_IVAR */
1564}
1565
1566static VALUE
1567update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1568{
1569 VALUE defined_class = 0;
1570 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1571
1572 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1573 defined_class = RBASIC(defined_class)->klass;
1574 }
1575
1576 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1577 if (!rb_cvc_tbl) {
1578 rb_bug("the cvc table should be set");
1579 }
1580
1581 VALUE ent_data;
1582 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1583 rb_bug("should have cvar cache entry");
1584 }
1585
1586 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1587
1588 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1589 ent->cref = cref;
1590 ic->entry = ent;
1591
1592 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1593 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1594 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1595 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1596
1597 return cvar_value;
1598}
1599
1600static inline VALUE
1601vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1602{
1603 const rb_cref_t *cref;
1604 cref = vm_get_cref(GET_EP());
1605
1606 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1607 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1608
1609 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1610 RUBY_ASSERT(!UNDEF_P(v));
1611
1612 return v;
1613 }
1614
1615 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1616
1617 return update_classvariable_cache(iseq, klass, id, cref, ic);
1618}
1619
1620VALUE
1621rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1622{
1623 return vm_getclassvariable(iseq, cfp, id, ic);
1624}
1625
1626static inline void
1627vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1628{
1629 const rb_cref_t *cref;
1630 cref = vm_get_cref(GET_EP());
1631
1632 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1633 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1634
1635 rb_class_ivar_set(ic->entry->class_value, id, val);
1636 return;
1637 }
1638
1639 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1640
1641 rb_cvar_set(klass, id, val);
1642
1643 update_classvariable_cache(iseq, klass, id, cref, ic);
1644}
1645
1646void
1647rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1648{
1649 vm_setclassvariable(iseq, cfp, id, val, ic);
1650}
1651
1652static inline VALUE
1653vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1654{
1655 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1656}
1657
1658static inline void
1659vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1660{
1661 if (RB_SPECIAL_CONST_P(obj)) {
1663 return;
1664 }
1665
1666 shape_id_t dest_shape_id;
1667 attr_index_t index;
1668 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1669
1670 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1671 switch (BUILTIN_TYPE(obj)) {
1672 case T_OBJECT:
1673 case T_CLASS:
1674 case T_MODULE:
1675 break;
1676 default:
1677 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1678 return;
1679 }
1680 }
1681 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1682 }
1683}
1684
1685void
1686rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1687{
1688 vm_setinstancevariable(iseq, obj, id, val, ic);
1689}
1690
1691static VALUE
1692vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1693{
1694 /* continue throw */
1695
1696 if (FIXNUM_P(err)) {
1697 ec->tag->state = RUBY_TAG_FATAL;
1698 }
1699 else if (SYMBOL_P(err)) {
1700 ec->tag->state = TAG_THROW;
1701 }
1702 else if (THROW_DATA_P(err)) {
1703 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1704 }
1705 else {
1706 ec->tag->state = TAG_RAISE;
1707 }
1708 return err;
1709}
1710
1711static VALUE
1712vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1713 const int flag, const VALUE throwobj)
1714{
1715 const rb_control_frame_t *escape_cfp = NULL;
1716 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1717
1718 if (flag != 0) {
1719 /* do nothing */
1720 }
1721 else if (state == TAG_BREAK) {
1722 int is_orphan = 1;
1723 const VALUE *ep = GET_EP();
1724 const rb_iseq_t *base_iseq = GET_ISEQ();
1725 escape_cfp = reg_cfp;
1726
1727 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1728 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1729 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1730 ep = escape_cfp->ep;
1731 base_iseq = escape_cfp->iseq;
1732 }
1733 else {
1734 ep = VM_ENV_PREV_EP(ep);
1735 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1736 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1737 VM_ASSERT(escape_cfp->iseq == base_iseq);
1738 }
1739 }
1740
1741 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1742 /* lambda{... break ...} */
1743 is_orphan = 0;
1744 state = TAG_RETURN;
1745 }
1746 else {
1747 ep = VM_ENV_PREV_EP(ep);
1748
1749 while (escape_cfp < eocfp) {
1750 if (escape_cfp->ep == ep) {
1751 const rb_iseq_t *const iseq = escape_cfp->iseq;
1752 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1753 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1754 unsigned int i;
1755
1756 if (!ct) break;
1757 for (i=0; i < ct->size; i++) {
1758 const struct iseq_catch_table_entry *const entry =
1759 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1760
1761 if (entry->type == CATCH_TYPE_BREAK &&
1762 entry->iseq == base_iseq &&
1763 entry->start < epc && entry->end >= epc) {
1764 if (entry->cont == epc) { /* found! */
1765 is_orphan = 0;
1766 }
1767 break;
1768 }
1769 }
1770 break;
1771 }
1772
1773 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1774 }
1775 }
1776
1777 if (is_orphan) {
1778 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1779 }
1780 }
1781 else if (state == TAG_RETRY) {
1782 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1783
1784 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1785 }
1786 else if (state == TAG_RETURN) {
1787 const VALUE *current_ep = GET_EP();
1788 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1789 int in_class_frame = 0;
1790 int toplevel = 1;
1791 escape_cfp = reg_cfp;
1792
1793 // find target_lep, target_ep
1794 while (!VM_ENV_LOCAL_P(ep)) {
1795 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1796 target_ep = ep;
1797 }
1798 ep = VM_ENV_PREV_EP(ep);
1799 }
1800 target_lep = ep;
1801
1802 while (escape_cfp < eocfp) {
1803 const VALUE *lep = VM_CF_LEP(escape_cfp);
1804
1805 if (!target_lep) {
1806 target_lep = lep;
1807 }
1808
1809 if (lep == target_lep &&
1810 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1811 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1812 in_class_frame = 1;
1813 target_lep = 0;
1814 }
1815
1816 if (lep == target_lep) {
1817 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1818 toplevel = 0;
1819 if (in_class_frame) {
1820 /* lambda {class A; ... return ...; end} */
1821 goto valid_return;
1822 }
1823 else {
1824 const VALUE *tep = current_ep;
1825
1826 while (target_lep != tep) {
1827 if (escape_cfp->ep == tep) {
1828 /* in lambda */
1829 if (tep == target_ep) {
1830 goto valid_return;
1831 }
1832 else {
1833 goto unexpected_return;
1834 }
1835 }
1836 tep = VM_ENV_PREV_EP(tep);
1837 }
1838 }
1839 }
1840 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1841 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1842 case ISEQ_TYPE_TOP:
1843 case ISEQ_TYPE_MAIN:
1844 if (toplevel) {
1845 if (in_class_frame) goto unexpected_return;
1846 if (target_ep == NULL) {
1847 goto valid_return;
1848 }
1849 else {
1850 goto unexpected_return;
1851 }
1852 }
1853 break;
1854 case ISEQ_TYPE_EVAL: {
1855 const rb_iseq_t *is = escape_cfp->iseq;
1856 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1857 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1858 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1859 t = ISEQ_BODY(is)->type;
1860 }
1861 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1862 break;
1863 }
1864 case ISEQ_TYPE_CLASS:
1865 toplevel = 0;
1866 break;
1867 default:
1868 break;
1869 }
1870 }
1871 }
1872
1873 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1874 if (target_ep == NULL) {
1875 goto valid_return;
1876 }
1877 else {
1878 goto unexpected_return;
1879 }
1880 }
1881
1882 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1883 }
1884 unexpected_return:;
1885 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1886
1887 valid_return:;
1888 /* do nothing */
1889 }
1890 else {
1891 rb_bug("isns(throw): unsupported throw type");
1892 }
1893
1894 ec->tag->state = state;
1895 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1896}
1897
1898static VALUE
1899vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1900 rb_num_t throw_state, VALUE throwobj)
1901{
1902 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1903 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1904
1905 if (state != 0) {
1906 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1907 }
1908 else {
1909 return vm_throw_continue(ec, throwobj);
1910 }
1911}
1912
1913VALUE
1914rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1915{
1916 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1917}
1918
1919static inline void
1920vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1921{
1922 int is_splat = flag & 0x01;
1923 const VALUE *ptr;
1924 rb_num_t len;
1925 const VALUE obj = ary;
1926
1927 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1928 ary = obj;
1929 ptr = &ary;
1930 len = 1;
1931 }
1932 else {
1933 ptr = RARRAY_CONST_PTR(ary);
1934 len = (rb_num_t)RARRAY_LEN(ary);
1935 }
1936
1937 if (num + is_splat == 0) {
1938 /* no space left on stack */
1939 }
1940 else if (flag & 0x02) {
1941 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1942 rb_num_t i = 0, j;
1943
1944 if (len < num) {
1945 for (i = 0; i < num - len; i++) {
1946 *cfp->sp++ = Qnil;
1947 }
1948 }
1949
1950 for (j = 0; i < num; i++, j++) {
1951 VALUE v = ptr[len - j - 1];
1952 *cfp->sp++ = v;
1953 }
1954
1955 if (is_splat) {
1956 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1957 }
1958 }
1959 else {
1960 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1961 if (is_splat) {
1962 if (num > len) {
1963 *cfp->sp++ = rb_ary_new();
1964 }
1965 else {
1966 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1967 }
1968 }
1969
1970 if (num > len) {
1971 rb_num_t i = 0;
1972 for (; i < num - len; i++) {
1973 *cfp->sp++ = Qnil;
1974 }
1975
1976 for (rb_num_t j = 0; i < num; i++, j++) {
1977 *cfp->sp++ = ptr[len - j - 1];
1978 }
1979 }
1980 else {
1981 for (rb_num_t j = 0; j < num; j++) {
1982 *cfp->sp++ = ptr[num - j - 1];
1983 }
1984 }
1985 }
1986
1987 RB_GC_GUARD(ary);
1988}
1989
1990static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1991
1992static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1993
1994static struct rb_class_cc_entries *
1995vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1996{
1997 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1998#if VM_CHECK_MODE > 0
1999 ccs->debug_sig = ~(VALUE)ccs;
2000#endif
2001 ccs->capa = 0;
2002 ccs->len = 0;
2003 ccs->cme = cme;
2004 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2005 ccs->entries = NULL;
2006
2007 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2008 RB_OBJ_WRITTEN(klass, Qundef, cme);
2009 return ccs;
2010}
2011
2012static void
2013vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2014{
2015 if (! vm_cc_markable(cc)) {
2016 return;
2017 }
2018
2019 if (UNLIKELY(ccs->len == ccs->capa)) {
2020 if (ccs->capa == 0) {
2021 ccs->capa = 1;
2022 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2023 }
2024 else {
2025 ccs->capa *= 2;
2026 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2027 }
2028 }
2029 VM_ASSERT(ccs->len < ccs->capa);
2030
2031 const int pos = ccs->len++;
2032 ccs->entries[pos].argc = vm_ci_argc(ci);
2033 ccs->entries[pos].flag = vm_ci_flag(ci);
2034 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2035
2036 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2037 // for tuning
2038 // vm_mtbl_dump(klass, 0);
2039 }
2040}
2041
2042#if VM_CHECK_MODE > 0
2043void
2044rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2045{
2046 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2047 for (int i=0; i<ccs->len; i++) {
2048 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2049 ccs->entries[i].flag,
2050 ccs->entries[i].argc);
2051 rp(ccs->entries[i].cc);
2052 }
2053}
2054
2055static int
2056vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2057{
2058 VM_ASSERT(vm_ccs_p(ccs));
2059 VM_ASSERT(ccs->len <= ccs->capa);
2060
2061 for (int i=0; i<ccs->len; i++) {
2062 const struct rb_callcache *cc = ccs->entries[i].cc;
2063
2064 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2065 VM_ASSERT(vm_cc_class_check(cc, klass));
2066 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2067 VM_ASSERT(!vm_cc_super_p(cc));
2068 VM_ASSERT(!vm_cc_refinement_p(cc));
2069 }
2070 return TRUE;
2071}
2072#endif
2073
2074const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2075
2076static const struct rb_callcache *
2077vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2078{
2079 const ID mid = vm_ci_mid(ci);
2080 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2081 struct rb_class_cc_entries *ccs = NULL;
2082 VALUE ccs_data;
2083
2084 if (cc_tbl) {
2085 // CCS data is keyed on method id, so we don't need the method id
2086 // for doing comparisons in the `for` loop below.
2087 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2088 ccs = (struct rb_class_cc_entries *)ccs_data;
2089 const int ccs_len = ccs->len;
2090
2091 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2092 rb_vm_ccs_free(ccs);
2093 rb_id_table_delete(cc_tbl, mid);
2094 ccs = NULL;
2095 }
2096 else {
2097 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2098
2099 // We already know the method id is correct because we had
2100 // to look up the ccs_data by method id. All we need to
2101 // compare is argc and flag
2102 unsigned int argc = vm_ci_argc(ci);
2103 unsigned int flag = vm_ci_flag(ci);
2104
2105 for (int i=0; i<ccs_len; i++) {
2106 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2107 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2108 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2109
2110 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2111
2112 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2113 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2114
2115 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2116 VM_ASSERT(ccs_cc->klass == klass);
2117 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2118
2119 return ccs_cc;
2120 }
2121 }
2122 }
2123 }
2124 }
2125 else {
2126 cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
2127 }
2128
2129 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2130
2131 const rb_callable_method_entry_t *cme;
2132
2133 if (ccs) {
2134 cme = ccs->cme;
2135 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2136
2137 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2138 }
2139 else {
2140 cme = rb_callable_method_entry(klass, mid);
2141 }
2142
2143 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2144
2145 if (cme == NULL) {
2146 // undef or not found: can't cache the information
2147 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2148 return &vm_empty_cc;
2149 }
2150
2151 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2152
2153 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2154
2155 if (ccs == NULL) {
2156 VM_ASSERT(cc_tbl != NULL);
2157
2158 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2159 // rb_callable_method_entry() prepares ccs.
2160 ccs = (struct rb_class_cc_entries *)ccs_data;
2161 }
2162 else {
2163 // TODO: required?
2164 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2165 }
2166 }
2167
2168 cme = rb_check_overloaded_cme(cme, ci);
2169
2170 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2171 vm_ccs_push(klass, ccs, ci, cc);
2172
2173 VM_ASSERT(vm_cc_cme(cc) != NULL);
2174 VM_ASSERT(cme->called_id == mid);
2175 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2176
2177 return cc;
2178}
2179
2180const struct rb_callcache *
2181rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2182{
2183 const struct rb_callcache *cc;
2184
2185 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2186
2187 RB_VM_LOCK_ENTER();
2188 {
2189 cc = vm_search_cc(klass, ci);
2190
2191 VM_ASSERT(cc);
2192 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2193 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2194 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2195 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2196 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2197 }
2198 RB_VM_LOCK_LEAVE();
2199
2200 return cc;
2201}
2202
2203static const struct rb_callcache *
2204vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2205{
2206#if USE_DEBUG_COUNTER
2207 const struct rb_callcache *old_cc = cd->cc;
2208#endif
2209
2210 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2211
2212#if OPT_INLINE_METHOD_CACHE
2213 cd->cc = cc;
2214
2215 const struct rb_callcache *empty_cc = &vm_empty_cc;
2216 if (cd_owner && cc != empty_cc) {
2217 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2218 }
2219
2220#if USE_DEBUG_COUNTER
2221 if (!old_cc || old_cc == empty_cc) {
2222 // empty
2223 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2224 }
2225 else if (old_cc == cc) {
2226 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2227 }
2228 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2229 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2230 }
2231 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2232 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2233 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2234 }
2235 else {
2236 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2237 }
2238#endif
2239#endif // OPT_INLINE_METHOD_CACHE
2240
2241 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2242 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2243
2244 return cc;
2245}
2246
2247ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2248static const struct rb_callcache *
2249vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2250{
2251 const struct rb_callcache *cc = cd->cc;
2252
2253#if OPT_INLINE_METHOD_CACHE
2254 if (LIKELY(vm_cc_class_check(cc, klass))) {
2255 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2256 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2257 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2258 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2259 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2260 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2261
2262 return cc;
2263 }
2264 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2265 }
2266 else {
2267 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2268 }
2269#endif
2270
2271 return vm_search_method_slowpath0(cd_owner, cd, klass);
2272}
2273
2274static const struct rb_callcache *
2275vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2276{
2277 VALUE klass = CLASS_OF(recv);
2278 VM_ASSERT(klass != Qfalse);
2279 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2280
2281 return vm_search_method_fastpath(cd_owner, cd, klass);
2282}
2283
2284#if __has_attribute(transparent_union)
2285typedef union {
2286 VALUE (*anyargs)(ANYARGS);
2287 VALUE (*f00)(VALUE);
2288 VALUE (*f01)(VALUE, VALUE);
2289 VALUE (*f02)(VALUE, VALUE, VALUE);
2290 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2291 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2292 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2293 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2294 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2303 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2304} __attribute__((__transparent_union__)) cfunc_type;
2305# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2306#else
2307typedef VALUE (*cfunc_type)(ANYARGS);
2308# define make_cfunc_type(f) (cfunc_type)(f)
2309#endif
2310
2311static inline int
2312check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2313{
2314 if (! me) {
2315 return false;
2316 }
2317 else {
2318 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2319 VM_ASSERT(callable_method_entry_p(me));
2320 VM_ASSERT(me->def);
2321 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2322 return false;
2323 }
2324 else {
2325#if __has_attribute(transparent_union)
2326 return me->def->body.cfunc.func == func.anyargs;
2327#else
2328 return me->def->body.cfunc.func == func;
2329#endif
2330 }
2331 }
2332}
2333
2334static inline int
2335check_method_basic_definition(const rb_callable_method_entry_t *me)
2336{
2337 return me && METHOD_ENTRY_BASIC(me);
2338}
2339
2340static inline int
2341vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2342{
2343 VM_ASSERT(iseq != NULL);
2344 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2345 return check_cfunc(vm_cc_cme(cc), func);
2346}
2347
2348#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2349#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2350
2351#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2352
2353static inline bool
2354FIXNUM_2_P(VALUE a, VALUE b)
2355{
2356 /* FIXNUM_P(a) && FIXNUM_P(b)
2357 * == ((a & 1) && (b & 1))
2358 * == a & b & 1 */
2359 SIGNED_VALUE x = a;
2360 SIGNED_VALUE y = b;
2361 SIGNED_VALUE z = x & y & 1;
2362 return z == 1;
2363}
2364
2365static inline bool
2366FLONUM_2_P(VALUE a, VALUE b)
2367{
2368#if USE_FLONUM
2369 /* FLONUM_P(a) && FLONUM_P(b)
2370 * == ((a & 3) == 2) && ((b & 3) == 2)
2371 * == ! ((a ^ 2) | (b ^ 2) & 3)
2372 */
2373 SIGNED_VALUE x = a;
2374 SIGNED_VALUE y = b;
2375 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2376 return !z;
2377#else
2378 return false;
2379#endif
2380}
2381
2382static VALUE
2383opt_equality_specialized(VALUE recv, VALUE obj)
2384{
2385 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2386 goto compare_by_identity;
2387 }
2388 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2389 goto compare_by_identity;
2390 }
2391 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2392 goto compare_by_identity;
2393 }
2394 else if (SPECIAL_CONST_P(recv)) {
2395 //
2396 }
2397 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2398 double a = RFLOAT_VALUE(recv);
2399 double b = RFLOAT_VALUE(obj);
2400
2401#if MSC_VERSION_BEFORE(1300)
2402 if (isnan(a)) {
2403 return Qfalse;
2404 }
2405 else if (isnan(b)) {
2406 return Qfalse;
2407 }
2408 else
2409#endif
2410 return RBOOL(a == b);
2411 }
2412 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2413 if (recv == obj) {
2414 return Qtrue;
2415 }
2416 else if (RB_TYPE_P(obj, T_STRING)) {
2417 return rb_str_eql_internal(obj, recv);
2418 }
2419 }
2420 return Qundef;
2421
2422 compare_by_identity:
2423 return RBOOL(recv == obj);
2424}
2425
2426static VALUE
2427opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2428{
2429 VM_ASSERT(cd_owner != NULL);
2430
2431 VALUE val = opt_equality_specialized(recv, obj);
2432 if (!UNDEF_P(val)) return val;
2433
2434 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2435 return Qundef;
2436 }
2437 else {
2438 return RBOOL(recv == obj);
2439 }
2440}
2441
2442#undef EQ_UNREDEFINED_P
2443
2444static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2445NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2446
2447static VALUE
2448opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2449{
2450 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2451
2452 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2453 return RBOOL(recv == obj);
2454 }
2455 else {
2456 return Qundef;
2457 }
2458}
2459
2460static VALUE
2461opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2462{
2463 VALUE val = opt_equality_specialized(recv, obj);
2464 if (!UNDEF_P(val)) {
2465 return val;
2466 }
2467 else {
2468 return opt_equality_by_mid_slowpath(recv, obj, mid);
2469 }
2470}
2471
2472VALUE
2473rb_equal_opt(VALUE obj1, VALUE obj2)
2474{
2475 return opt_equality_by_mid(obj1, obj2, idEq);
2476}
2477
2478VALUE
2479rb_eql_opt(VALUE obj1, VALUE obj2)
2480{
2481 return opt_equality_by_mid(obj1, obj2, idEqlP);
2482}
2483
2484extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2485extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2486
2487static VALUE
2488check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2489{
2490 switch (type) {
2491 case VM_CHECKMATCH_TYPE_WHEN:
2492 return pattern;
2493 case VM_CHECKMATCH_TYPE_RESCUE:
2494 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2495 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2496 }
2497 /* fall through */
2498 case VM_CHECKMATCH_TYPE_CASE: {
2499 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2500 }
2501 default:
2502 rb_bug("check_match: unreachable");
2503 }
2504}
2505
2506
2507#if MSC_VERSION_BEFORE(1300)
2508#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2509#else
2510#define CHECK_CMP_NAN(a, b) /* do nothing */
2511#endif
2512
2513static inline VALUE
2514double_cmp_lt(double a, double b)
2515{
2516 CHECK_CMP_NAN(a, b);
2517 return RBOOL(a < b);
2518}
2519
2520static inline VALUE
2521double_cmp_le(double a, double b)
2522{
2523 CHECK_CMP_NAN(a, b);
2524 return RBOOL(a <= b);
2525}
2526
2527static inline VALUE
2528double_cmp_gt(double a, double b)
2529{
2530 CHECK_CMP_NAN(a, b);
2531 return RBOOL(a > b);
2532}
2533
2534static inline VALUE
2535double_cmp_ge(double a, double b)
2536{
2537 CHECK_CMP_NAN(a, b);
2538 return RBOOL(a >= b);
2539}
2540
2541// Copied by vm_dump.c
2542static inline VALUE *
2543vm_base_ptr(const rb_control_frame_t *cfp)
2544{
2545 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2546
2547 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2548 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2549
2550 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2551 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2552 int params = ISEQ_BODY(cfp->iseq)->param.size;
2553
2554 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2555 bp += vm_ci_argc(ci);
2556 }
2557
2558 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2559 /* adjust `self' */
2560 bp += 1;
2561 }
2562#if VM_DEBUG_BP_CHECK
2563 if (bp != cfp->bp_check) {
2564 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2565 (long)(cfp->bp_check - GET_EC()->vm_stack),
2566 (long)(bp - GET_EC()->vm_stack));
2567 rb_bug("vm_base_ptr: unreachable");
2568 }
2569#endif
2570 return bp;
2571 }
2572 else {
2573 return NULL;
2574 }
2575}
2576
2577VALUE *
2578rb_vm_base_ptr(const rb_control_frame_t *cfp)
2579{
2580 return vm_base_ptr(cfp);
2581}
2582
2583/* method call processes with call_info */
2584
2585#include "vm_args.c"
2586
2587static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2588ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2589static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2590static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2591static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2592static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2593static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2594
2595static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2596
2597static VALUE
2598vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2599{
2600 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2601
2602 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2603}
2604
2605static VALUE
2606vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2607{
2608 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2609
2610 const struct rb_callcache *cc = calling->cc;
2611 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2612 int param = ISEQ_BODY(iseq)->param.size;
2613 int local = ISEQ_BODY(iseq)->local_table_size;
2614 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2615}
2616
2617bool
2618rb_simple_iseq_p(const rb_iseq_t *iseq)
2619{
2620 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2621 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2622 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2623 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2624 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2627 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2628}
2629
2630bool
2631rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2632{
2633 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2634 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2635 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2636 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2637 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2638 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2639 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2640 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2641}
2642
2643bool
2644rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2645{
2646 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2647 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2648 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2649 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2650 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2651 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2652 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2653}
2654
2655#define ALLOW_HEAP_ARGV (-2)
2656#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2657
2658static inline bool
2659vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2660{
2661 vm_check_canary(GET_EC(), cfp->sp);
2662 bool ret = false;
2663
2664 if (!NIL_P(ary)) {
2665 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2666 long len = RARRAY_LEN(ary);
2667 int argc = calling->argc;
2668
2669 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2670 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2671 * a temporary array, instead of trying to keeping arguments on the VM stack.
2672 */
2673 VALUE *argv = cfp->sp - argc;
2674 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2675 rb_ary_cat(argv_ary, argv, argc);
2676 rb_ary_cat(argv_ary, ptr, len);
2677 cfp->sp -= argc - 1;
2678 cfp->sp[-1] = argv_ary;
2679 calling->argc = 1;
2680 calling->heap_argv = argv_ary;
2681 RB_GC_GUARD(ary);
2682 }
2683 else {
2684 long i;
2685
2686 if (max_args >= 0 && len + argc > max_args) {
2687 /* If only a given max_args is allowed, copy up to max args.
2688 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2689 * where additional arguments are ignored.
2690 *
2691 * Also, copy up to one more argument than the maximum,
2692 * in case it is an empty keyword hash that will be removed.
2693 */
2694 calling->argc += len - (max_args - argc + 1);
2695 len = max_args - argc + 1;
2696 ret = true;
2697 }
2698 else {
2699 /* Unset heap_argv if set originally. Can happen when
2700 * forwarding modified arguments, where heap_argv was used
2701 * originally, but heap_argv not supported by the forwarded
2702 * method in all cases.
2703 */
2704 calling->heap_argv = 0;
2705 }
2706 CHECK_VM_STACK_OVERFLOW(cfp, len);
2707
2708 for (i = 0; i < len; i++) {
2709 *cfp->sp++ = ptr[i];
2710 }
2711 calling->argc += i;
2712 }
2713 }
2714
2715 return ret;
2716}
2717
2718static inline void
2719vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2720{
2721 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2722 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2723 const VALUE h = rb_hash_new_with_size(kw_len);
2724 VALUE *sp = cfp->sp;
2725 int i;
2726
2727 for (i=0; i<kw_len; i++) {
2728 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2729 }
2730 (sp-kw_len)[0] = h;
2731
2732 cfp->sp -= kw_len - 1;
2733 calling->argc -= kw_len - 1;
2734 calling->kw_splat = 1;
2735}
2736
2737static inline VALUE
2738vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2739{
2740 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2741 if (keyword_hash != Qnil) {
2742 /* Convert a non-hash keyword splat to a new hash */
2743 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2744 }
2745 }
2746 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2747 /* Convert a hash keyword splat to a new hash unless
2748 * a mutable keyword splat was passed.
2749 * Skip allocating new hash for empty keyword splat, as empty
2750 * keyword splat will be ignored by both callers.
2751 */
2752 keyword_hash = rb_hash_dup(keyword_hash);
2753 }
2754 return keyword_hash;
2755}
2756
2757static inline void
2758CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2759 struct rb_calling_info *restrict calling,
2760 const struct rb_callinfo *restrict ci, int max_args)
2761{
2762 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2763 if (IS_ARGS_KW_SPLAT(ci)) {
2764 // f(*a, **kw)
2765 VM_ASSERT(calling->kw_splat == 1);
2766
2767 cfp->sp -= 2;
2768 calling->argc -= 2;
2769 VALUE ary = cfp->sp[0];
2770 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2771
2772 // splat a
2773 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2774
2775 // put kw
2776 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2777 if (UNLIKELY(calling->heap_argv)) {
2778 rb_ary_push(calling->heap_argv, kwh);
2779 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2780 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2781 calling->kw_splat = 0;
2782 }
2783 }
2784 else {
2785 cfp->sp[0] = kwh;
2786 cfp->sp++;
2787 calling->argc++;
2788
2789 VM_ASSERT(calling->kw_splat == 1);
2790 }
2791 }
2792 else {
2793 calling->kw_splat = 0;
2794 }
2795 }
2796 else {
2797 // f(*a)
2798 VM_ASSERT(calling->kw_splat == 0);
2799
2800 cfp->sp -= 1;
2801 calling->argc -= 1;
2802 VALUE ary = cfp->sp[0];
2803
2804 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2805 goto check_keyword;
2806 }
2807
2808 // check the last argument
2809 VALUE last_hash, argv_ary;
2810 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2811 if (!IS_ARGS_KEYWORD(ci) &&
2812 RARRAY_LEN(argv_ary) > 0 &&
2813 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2814 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2815
2816 rb_ary_pop(argv_ary);
2817 if (!RHASH_EMPTY_P(last_hash)) {
2818 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2819 calling->kw_splat = 1;
2820 }
2821 }
2822 }
2823 else {
2824check_keyword:
2825 if (!IS_ARGS_KEYWORD(ci) &&
2826 calling->argc > 0 &&
2827 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2828 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2829
2830 if (RHASH_EMPTY_P(last_hash)) {
2831 calling->argc--;
2832 cfp->sp -= 1;
2833 }
2834 else {
2835 cfp->sp[-1] = rb_hash_dup(last_hash);
2836 calling->kw_splat = 1;
2837 }
2838 }
2839 }
2840 }
2841 }
2842 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2843 // f(**kw)
2844 VM_ASSERT(calling->kw_splat == 1);
2845 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2846
2847 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2848 cfp->sp--;
2849 calling->argc--;
2850 calling->kw_splat = 0;
2851 }
2852 else {
2853 cfp->sp[-1] = kwh;
2854 }
2855 }
2856 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2857 // f(k1:1, k2:2)
2858 VM_ASSERT(calling->kw_splat == 0);
2859
2860 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2861 * by creating a keyword hash.
2862 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2863 */
2864 vm_caller_setup_arg_kw(cfp, calling, ci);
2865 }
2866}
2867
2868#define USE_OPT_HIST 0
2869
2870#if USE_OPT_HIST
2871#define OPT_HIST_MAX 64
2872static int opt_hist[OPT_HIST_MAX+1];
2873
2874__attribute__((destructor))
2875static void
2876opt_hist_show_results_at_exit(void)
2877{
2878 for (int i=0; i<OPT_HIST_MAX; i++) {
2879 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2880 }
2881}
2882#endif
2883
2884static VALUE
2885vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2886 struct rb_calling_info *calling)
2887{
2888 const struct rb_callcache *cc = calling->cc;
2889 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2890 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2891 const int opt = calling->argc - lead_num;
2892 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2893 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2894 const int param = ISEQ_BODY(iseq)->param.size;
2895 const int local = ISEQ_BODY(iseq)->local_table_size;
2896 const int delta = opt_num - opt;
2897
2898 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2899
2900#if USE_OPT_HIST
2901 if (opt_pc < OPT_HIST_MAX) {
2902 opt_hist[opt]++;
2903 }
2904 else {
2905 opt_hist[OPT_HIST_MAX]++;
2906 }
2907#endif
2908
2909 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2910}
2911
2912static VALUE
2913vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2914 struct rb_calling_info *calling)
2915{
2916 const struct rb_callcache *cc = calling->cc;
2917 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2918 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2919 const int opt = calling->argc - lead_num;
2920 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2921
2922 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2923
2924#if USE_OPT_HIST
2925 if (opt_pc < OPT_HIST_MAX) {
2926 opt_hist[opt]++;
2927 }
2928 else {
2929 opt_hist[OPT_HIST_MAX]++;
2930 }
2931#endif
2932
2933 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2934}
2935
2936static void
2937args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2938 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2939 VALUE *const locals);
2940
2941static VALUE
2942vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2943 struct rb_calling_info *calling)
2944{
2945 const struct rb_callcache *cc = calling->cc;
2946 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2947 int param_size = ISEQ_BODY(iseq)->param.size;
2948 int local_size = ISEQ_BODY(iseq)->local_table_size;
2949
2950 // Setting up local size and param size
2951 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2952
2953 local_size = local_size + vm_ci_argc(calling->cd->ci);
2954 param_size = param_size + vm_ci_argc(calling->cd->ci);
2955
2956 cfp->sp[0] = (VALUE)calling->cd->ci;
2957
2958 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2959}
2960
2961static VALUE
2962vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2963 struct rb_calling_info *calling)
2964{
2965 const struct rb_callinfo *ci = calling->cd->ci;
2966 const struct rb_callcache *cc = calling->cc;
2967
2968 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2969 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2970
2971 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2972 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2973 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2974 const int ci_kw_len = kw_arg->keyword_len;
2975 const VALUE * const ci_keywords = kw_arg->keywords;
2976 VALUE *argv = cfp->sp - calling->argc;
2977 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2978 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2979 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2980 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2981 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2982
2983 int param = ISEQ_BODY(iseq)->param.size;
2984 int local = ISEQ_BODY(iseq)->local_table_size;
2985 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2986}
2987
2988static VALUE
2989vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2990 struct rb_calling_info *calling)
2991{
2992 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2993 const struct rb_callcache *cc = calling->cc;
2994
2995 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2996 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2997
2998 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2999 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3000 VALUE * const argv = cfp->sp - calling->argc;
3001 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3002
3003 int i;
3004 for (i=0; i<kw_param->num; i++) {
3005 klocals[i] = kw_param->default_values[i];
3006 }
3007 klocals[i] = INT2FIX(0); // kw specify flag
3008 // NOTE:
3009 // nobody check this value, but it should be cleared because it can
3010 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3011
3012 int param = ISEQ_BODY(iseq)->param.size;
3013 int local = ISEQ_BODY(iseq)->local_table_size;
3014 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3015}
3016
3017static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3018
3019static VALUE
3020vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3021 struct rb_calling_info *calling)
3022{
3023 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3024 cfp->sp -= (calling->argc + 1);
3025 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3026 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3027}
3028
3029VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3030
3031static void
3032warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3033{
3034 rb_vm_t *vm = GET_VM();
3035 st_table *dup_check_table = vm->unused_block_warning_table;
3036 st_data_t key;
3037 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3038
3039 union {
3040 VALUE v;
3041 unsigned char b[SIZEOF_VALUE];
3042 } k1 = {
3043 .v = (VALUE)pc,
3044 }, k2 = {
3045 .v = (VALUE)cme->def,
3046 };
3047
3048 // relax check
3049 if (!strict_unused_block) {
3050 key = (st_data_t)cme->def->original_id;
3051
3052 if (st_lookup(dup_check_table, key, NULL)) {
3053 return;
3054 }
3055 }
3056
3057 // strict check
3058 // make unique key from pc and me->def pointer
3059 key = 0;
3060 for (int i=0; i<SIZEOF_VALUE; i++) {
3061 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3062 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3063 }
3064
3065 if (0) {
3066 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3067 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3068 fprintf(stderr, "key:%p\n", (void *)key);
3069 }
3070
3071 // duplication check
3072 if (st_insert(dup_check_table, key, 1)) {
3073 // already shown
3074 }
3075 else if (RTEST(ruby_verbose) || strict_unused_block) {
3076 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3077 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3078
3079 if (!NIL_P(m_loc)) {
3080 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3081 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3082 }
3083 else {
3084 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3085 }
3086 }
3087}
3088
3089static inline int
3090vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3091 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3092{
3093 const struct rb_callinfo *ci = calling->cd->ci;
3094 const struct rb_callcache *cc = calling->cc;
3095
3096 VM_ASSERT((vm_ci_argc(ci), 1));
3097 VM_ASSERT(vm_cc_cme(cc) != NULL);
3098
3099 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3100 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3101 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3102 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3103 }
3104
3105 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3106 if (LIKELY(rb_simple_iseq_p(iseq))) {
3107 rb_control_frame_t *cfp = ec->cfp;
3108 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3109 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3110
3111 if (calling->argc != lead_num) {
3112 argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3113 }
3114
3115 //VM_ASSERT(ci == calling->cd->ci);
3116 VM_ASSERT(cc == calling->cc);
3117
3118 if (vm_call_iseq_optimizable_p(ci, cc)) {
3119 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3120 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3121 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3122 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3123 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3124 }
3125 else {
3126 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3127 }
3128 }
3129 return 0;
3130 }
3131 else if (rb_iseq_only_optparam_p(iseq)) {
3132 rb_control_frame_t *cfp = ec->cfp;
3133
3134 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3135 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3136
3137 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3138 const int argc = calling->argc;
3139 const int opt = argc - lead_num;
3140
3141 if (opt < 0 || opt > opt_num) {
3142 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3143 }
3144
3145 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3146 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3147 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3148 vm_call_cacheable(ci, cc));
3149 }
3150 else {
3151 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3152 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3153 vm_call_cacheable(ci, cc));
3154 }
3155
3156 /* initialize opt vars for self-references */
3157 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3158 for (int i=argc; i<lead_num + opt_num; i++) {
3159 argv[i] = Qnil;
3160 }
3161 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3162 }
3163 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3164 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3165 const int argc = calling->argc;
3166 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3167
3168 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3169 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3170
3171 if (argc - kw_arg->keyword_len == lead_num) {
3172 const int ci_kw_len = kw_arg->keyword_len;
3173 const VALUE * const ci_keywords = kw_arg->keywords;
3174 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3175 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3176
3177 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3178 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3179
3180 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3181 vm_call_cacheable(ci, cc));
3182
3183 return 0;
3184 }
3185 }
3186 else if (argc == lead_num) {
3187 /* no kwarg */
3188 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3189 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3190
3191 if (klocals[kw_param->num] == INT2FIX(0)) {
3192 /* copy from default_values */
3193 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3194 vm_call_cacheable(ci, cc));
3195 }
3196
3197 return 0;
3198 }
3199 }
3200 }
3201
3202 // Called iseq is using ... param
3203 // def foo(...) # <- iseq for foo will have "forwardable"
3204 //
3205 // We want to set the `...` local to the caller's CI
3206 // foo(1, 2) # <- the ci for this should end up as `...`
3207 //
3208 // So hopefully the stack looks like:
3209 //
3210 // => 1
3211 // => 2
3212 // => *
3213 // => **
3214 // => &
3215 // => ... # <- points at `foo`s CI
3216 // => cref_or_me
3217 // => specval
3218 // => type
3219 //
3220 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3221 bool can_fastpath = true;
3222
3223 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3224 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3225 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3226 ci = vm_ci_new_runtime(
3227 vm_ci_mid(ci),
3228 vm_ci_flag(ci),
3229 vm_ci_argc(ci),
3230 vm_ci_kwarg(ci));
3231 } else {
3232 ci = forward_cd->caller_ci;
3233 }
3234 can_fastpath = false;
3235 }
3236 // C functions calling iseqs will stack allocate a CI,
3237 // so we need to convert it to heap allocated
3238 if (!vm_ci_markable(ci)) {
3239 ci = vm_ci_new_runtime(
3240 vm_ci_mid(ci),
3241 vm_ci_flag(ci),
3242 vm_ci_argc(ci),
3243 vm_ci_kwarg(ci));
3244 can_fastpath = false;
3245 }
3246 argv[param_size - 1] = (VALUE)ci;
3247 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3248 return 0;
3249 }
3250
3251 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3252}
3253
3254static void
3255vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3256{
3257 // This case is when the caller is using a ... parameter.
3258 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3259 // In this case the caller's caller's CI will be on the stack.
3260 //
3261 // For example:
3262 //
3263 // def bar(a, b); a + b; end
3264 // def foo(...); bar(...); end
3265 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3266 //
3267 // Stack layout will be:
3268 //
3269 // > 1
3270 // > 2
3271 // > CI for foo(1, 2)
3272 // > cref_or_me
3273 // > specval
3274 // > type
3275 // > receiver
3276 // > CI for foo(1, 2), via `getlocal ...`
3277 // > ( SP points here )
3278 const VALUE * lep = VM_CF_LEP(cfp);
3279
3280 const rb_iseq_t *iseq;
3281
3282 // If we're in an escaped environment (lambda for example), get the iseq
3283 // from the captured env.
3284 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3285 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3286 iseq = env->iseq;
3287 }
3288 else { // Otherwise use the lep to find the caller
3289 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3290 }
3291
3292 // Our local storage is below the args we need to copy
3293 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3294
3295 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3296 VALUE * to = cfp->sp - 1; // clobber the CI
3297
3298 if (RTEST(splat)) {
3299 to -= 1; // clobber the splat array
3300 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3301 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3302 to += RARRAY_LEN(splat);
3303 }
3304
3305 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3306 MEMCPY(to, from, VALUE, argc);
3307 cfp->sp = to + argc;
3308
3309 // Stack layout should now be:
3310 //
3311 // > 1
3312 // > 2
3313 // > CI for foo(1, 2)
3314 // > cref_or_me
3315 // > specval
3316 // > type
3317 // > receiver
3318 // > 1
3319 // > 2
3320 // > ( SP points here )
3321}
3322
3323static VALUE
3324vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3325{
3326 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3327
3328 const struct rb_callcache *cc = calling->cc;
3329 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3330 int param_size = ISEQ_BODY(iseq)->param.size;
3331 int local_size = ISEQ_BODY(iseq)->local_table_size;
3332
3333 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3334
3335 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3336 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3337}
3338
3339static VALUE
3340vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3341{
3342 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3343
3344 const struct rb_callcache *cc = calling->cc;
3345 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3346 int param_size = ISEQ_BODY(iseq)->param.size;
3347 int local_size = ISEQ_BODY(iseq)->local_table_size;
3348
3349 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3350
3351 // Setting up local size and param size
3352 local_size = local_size + vm_ci_argc(calling->cd->ci);
3353 param_size = param_size + vm_ci_argc(calling->cd->ci);
3354
3355 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3356 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3357}
3358
3359static inline VALUE
3360vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3361 int opt_pc, int param_size, int local_size)
3362{
3363 const struct rb_callinfo *ci = calling->cd->ci;
3364 const struct rb_callcache *cc = calling->cc;
3365
3366 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3367 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3368 }
3369 else {
3370 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3371 }
3372}
3373
3374static inline VALUE
3375vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3376 int opt_pc, int param_size, int local_size)
3377{
3378 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3379 VALUE *argv = cfp->sp - calling->argc;
3380 VALUE *sp = argv + param_size;
3381 cfp->sp = argv - 1 /* recv */;
3382
3383 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3384 calling->block_handler, (VALUE)me,
3385 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3386 local_size - param_size,
3387 ISEQ_BODY(iseq)->stack_max);
3388 return Qundef;
3389}
3390
3391static inline VALUE
3392vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3393{
3394 const struct rb_callcache *cc = calling->cc;
3395 unsigned int i;
3396 VALUE *argv = cfp->sp - calling->argc;
3397 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3398 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3399 VALUE *src_argv = argv;
3400 VALUE *sp_orig, *sp;
3401 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3402
3403 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3404 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3405 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3406 dst_captured->code.val = src_captured->code.val;
3407 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3408 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3409 }
3410 else {
3411 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3412 }
3413 }
3414
3415 vm_pop_frame(ec, cfp, cfp->ep);
3416 cfp = ec->cfp;
3417
3418 sp_orig = sp = cfp->sp;
3419
3420 /* push self */
3421 sp[0] = calling->recv;
3422 sp++;
3423
3424 /* copy arguments */
3425 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3426 *sp++ = src_argv[i];
3427 }
3428
3429 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3430 calling->recv, calling->block_handler, (VALUE)me,
3431 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3432 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3433 ISEQ_BODY(iseq)->stack_max);
3434
3435 cfp->sp = sp_orig;
3436
3437 return Qundef;
3438}
3439
3440static void
3441ractor_unsafe_check(void)
3442{
3443 if (!rb_ractor_main_p()) {
3444 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3445 }
3446}
3447
3448static VALUE
3449call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3450{
3451 ractor_unsafe_check();
3452 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3453 return (*f)(recv, rb_ary_new4(argc, argv));
3454}
3455
3456static VALUE
3457call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3458{
3459 ractor_unsafe_check();
3460 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3461 return (*f)(argc, argv, recv);
3462}
3463
3464static VALUE
3465call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3466{
3467 ractor_unsafe_check();
3468 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3469 return (*f)(recv);
3470}
3471
3472static VALUE
3473call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3474{
3475 ractor_unsafe_check();
3476 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3477 return (*f)(recv, argv[0]);
3478}
3479
3480static VALUE
3481call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3482{
3483 ractor_unsafe_check();
3484 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3485 return (*f)(recv, argv[0], argv[1]);
3486}
3487
3488static VALUE
3489call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3490{
3491 ractor_unsafe_check();
3492 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3493 return (*f)(recv, argv[0], argv[1], argv[2]);
3494}
3495
3496static VALUE
3497call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3498{
3499 ractor_unsafe_check();
3500 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3501 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3502}
3503
3504static VALUE
3505call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3506{
3507 ractor_unsafe_check();
3508 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3509 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3510}
3511
3512static VALUE
3513call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3514{
3515 ractor_unsafe_check();
3517 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3518}
3519
3520static VALUE
3521call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3522{
3523 ractor_unsafe_check();
3525 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3526}
3527
3528static VALUE
3529call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3530{
3531 ractor_unsafe_check();
3533 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3534}
3535
3536static VALUE
3537call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3538{
3539 ractor_unsafe_check();
3541 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3542}
3543
3544static VALUE
3545call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3546{
3547 ractor_unsafe_check();
3549 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3550}
3551
3552static VALUE
3553call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3554{
3555 ractor_unsafe_check();
3557 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3558}
3559
3560static VALUE
3561call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3562{
3563 ractor_unsafe_check();
3565 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3566}
3567
3568static VALUE
3569call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3570{
3571 ractor_unsafe_check();
3573 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3574}
3575
3576static VALUE
3577call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3578{
3579 ractor_unsafe_check();
3581 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3582}
3583
3584static VALUE
3585call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3586{
3587 ractor_unsafe_check();
3589 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3590}
3591
3592static VALUE
3593ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3594{
3595 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3596 return (*f)(recv, rb_ary_new4(argc, argv));
3597}
3598
3599static VALUE
3600ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3601{
3602 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3603 return (*f)(argc, argv, recv);
3604}
3605
3606static VALUE
3607ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3608{
3609 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3610 return (*f)(recv);
3611}
3612
3613static VALUE
3614ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3615{
3616 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3617 return (*f)(recv, argv[0]);
3618}
3619
3620static VALUE
3621ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3622{
3623 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3624 return (*f)(recv, argv[0], argv[1]);
3625}
3626
3627static VALUE
3628ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3629{
3630 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3631 return (*f)(recv, argv[0], argv[1], argv[2]);
3632}
3633
3634static VALUE
3635ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3636{
3637 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3638 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3639}
3640
3641static VALUE
3642ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643{
3644 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3645 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3646}
3647
3648static VALUE
3649ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3650{
3652 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3653}
3654
3655static VALUE
3656ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657{
3659 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3660}
3661
3662static VALUE
3663ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3664{
3666 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3667}
3668
3669static VALUE
3670ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3671{
3673 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3674}
3675
3676static VALUE
3677ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3678{
3680 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3681}
3682
3683static VALUE
3684ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3685{
3687 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3688}
3689
3690static VALUE
3691ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3692{
3694 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3695}
3696
3697static VALUE
3698ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3699{
3701 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3702}
3703
3704static VALUE
3705ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3706{
3708 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3709}
3710
3711static VALUE
3712ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3713{
3715 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3716}
3717
3718static inline int
3719vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3720{
3721 const int ov_flags = RAISED_STACKOVERFLOW;
3722 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3723 if (rb_ec_raised_p(ec, ov_flags)) {
3724 rb_ec_raised_reset(ec, ov_flags);
3725 return TRUE;
3726 }
3727 return FALSE;
3728}
3729
3730#define CHECK_CFP_CONSISTENCY(func) \
3731 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3732 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3733
3734static inline
3735const rb_method_cfunc_t *
3736vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3737{
3738#if VM_DEBUG_VERIFY_METHOD_CACHE
3739 switch (me->def->type) {
3740 case VM_METHOD_TYPE_CFUNC:
3741 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3742 break;
3743# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3744 METHOD_BUG(ISEQ);
3745 METHOD_BUG(ATTRSET);
3746 METHOD_BUG(IVAR);
3747 METHOD_BUG(BMETHOD);
3748 METHOD_BUG(ZSUPER);
3749 METHOD_BUG(UNDEF);
3750 METHOD_BUG(OPTIMIZED);
3751 METHOD_BUG(MISSING);
3752 METHOD_BUG(REFINED);
3753 METHOD_BUG(ALIAS);
3754# undef METHOD_BUG
3755 default:
3756 rb_bug("wrong method type: %d", me->def->type);
3757 }
3758#endif
3759 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3760}
3761
3762static VALUE
3763vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3764 int argc, VALUE *argv, VALUE *stack_bottom)
3765{
3766 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3767 const struct rb_callinfo *ci = calling->cd->ci;
3768 const struct rb_callcache *cc = calling->cc;
3769 VALUE val;
3770 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3771 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3772
3773 VALUE recv = calling->recv;
3774 VALUE block_handler = calling->block_handler;
3775 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3776
3777 if (UNLIKELY(calling->kw_splat)) {
3778 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3779 }
3780
3781 VM_ASSERT(reg_cfp == ec->cfp);
3782
3783 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3784 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3785
3786 vm_push_frame(ec, NULL, frame_type, recv,
3787 block_handler, (VALUE)me,
3788 0, ec->cfp->sp, 0, 0);
3789
3790 int len = cfunc->argc;
3791 if (len >= 0) rb_check_arity(argc, len, len);
3792
3793 reg_cfp->sp = stack_bottom;
3794 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3795
3796 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3797
3798 rb_vm_pop_frame(ec);
3799
3800 VM_ASSERT(ec->cfp->sp == stack_bottom);
3801
3802 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3803 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3804
3805 return val;
3806}
3807
3808// Push a C method frame for a given cme. This is called when JIT code skipped
3809// pushing a frame but the C method reached a point where a frame is needed.
3810void
3811rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3812{
3813 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3814 rb_execution_context_t *ec = GET_EC();
3815 VALUE *sp = ec->cfp->sp;
3816 VALUE recv = *(sp - recv_idx - 1);
3817 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3818 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3819#if VM_CHECK_MODE > 0
3820 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3821 *(GET_EC()->cfp->sp) = Qfalse;
3822#endif
3823 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3824}
3825
3826// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3827bool
3828rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3829{
3830 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3831}
3832
3833static VALUE
3834vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3835{
3836 int argc = calling->argc;
3837 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3838 VALUE *argv = &stack_bottom[1];
3839
3840 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3841}
3842
3843static VALUE
3844vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3845{
3846 const struct rb_callinfo *ci = calling->cd->ci;
3847 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3848
3849 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3850 VALUE argv_ary;
3851 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3852 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3853 int argc = RARRAY_LENINT(argv_ary);
3854 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3855 VALUE *stack_bottom = reg_cfp->sp - 2;
3856
3857 VM_ASSERT(calling->argc == 1);
3858 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3859 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3860
3861 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3862 }
3863 else {
3864 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3865
3866 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3867 }
3868}
3869
3870static inline VALUE
3871vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3872{
3873 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3874 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3875
3876 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3877 return vm_call_cfunc_other(ec, reg_cfp, calling);
3878 }
3879
3880 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3881 calling->kw_splat = 0;
3882 int i;
3883 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3884 VALUE *sp = stack_bottom;
3885 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3886 for(i = 0; i < argc; i++) {
3887 *++sp = argv[i];
3888 }
3889 reg_cfp->sp = sp+1;
3890
3891 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3892}
3893
3894static inline VALUE
3895vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3896{
3897 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3898 VALUE argv_ary = reg_cfp->sp[-1];
3899 int argc = RARRAY_LENINT(argv_ary);
3900 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3901 VALUE last_hash;
3902 int argc_offset = 0;
3903
3904 if (UNLIKELY(argc > 0 &&
3905 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3906 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3907 if (!RHASH_EMPTY_P(last_hash)) {
3908 return vm_call_cfunc_other(ec, reg_cfp, calling);
3909 }
3910 argc_offset++;
3911 }
3912 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3913}
3914
3915static inline VALUE
3916vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3917{
3918 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3919 VALUE keyword_hash = reg_cfp->sp[-1];
3920
3921 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3922 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3923 }
3924
3925 return vm_call_cfunc_other(ec, reg_cfp, calling);
3926}
3927
3928static VALUE
3929vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3930{
3931 const struct rb_callinfo *ci = calling->cd->ci;
3932 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3933
3934 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3935 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3936 // f(*a)
3937 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3938 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3939 }
3940 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3941 // f(*a, **kw)
3942 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3943 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3944 }
3945 }
3946
3947 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3948 return vm_call_cfunc_other(ec, reg_cfp, calling);
3949}
3950
3951static VALUE
3952vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3953{
3954 const struct rb_callcache *cc = calling->cc;
3955 RB_DEBUG_COUNTER_INC(ccf_ivar);
3956 cfp->sp -= 1;
3957 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3958 return ivar;
3959}
3960
3961static VALUE
3962vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3963{
3964 RB_DEBUG_COUNTER_INC(ccf_attrset);
3965 VALUE val = *(cfp->sp - 1);
3966 cfp->sp -= 2;
3967 attr_index_t index = vm_cc_attr_index(cc);
3968 shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3969 ID id = vm_cc_cme(cc)->def->body.attr.id;
3970 rb_check_frozen(obj);
3971 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3972 if (UNDEF_P(res)) {
3973 switch (BUILTIN_TYPE(obj)) {
3974 case T_OBJECT:
3975 case T_CLASS:
3976 case T_MODULE:
3977 break;
3978 default:
3979 {
3980 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3981 if (!UNDEF_P(res)) {
3982 return res;
3983 }
3984 }
3985 }
3986 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3987 }
3988 return res;
3989}
3990
3991static VALUE
3992vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3993{
3994 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3995}
3996
3997static inline VALUE
3998vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3999{
4000 rb_proc_t *proc;
4001 VALUE val;
4002 const struct rb_callcache *cc = calling->cc;
4003 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4004 VALUE procv = cme->def->body.bmethod.proc;
4005
4006 if (!RB_OBJ_SHAREABLE_P(procv) &&
4007 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4008 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4009 }
4010
4011 /* control block frame */
4012 GetProcPtr(procv, proc);
4013 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4014
4015 return val;
4016}
4017
4018static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4019
4020static VALUE
4021vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4022{
4023 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4024
4025 const struct rb_callcache *cc = calling->cc;
4026 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4027 VALUE procv = cme->def->body.bmethod.proc;
4028
4029 if (!RB_OBJ_SHAREABLE_P(procv) &&
4030 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4031 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4032 }
4033
4034 rb_proc_t *proc;
4035 GetProcPtr(procv, proc);
4036 const struct rb_block *block = &proc->block;
4037
4038 while (vm_block_type(block) == block_type_proc) {
4039 block = vm_proc_block(block->as.proc);
4040 }
4041 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4042
4043 const struct rb_captured_block *captured = &block->as.captured;
4044 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4045 VALUE * const argv = cfp->sp - calling->argc;
4046 const int arg_size = ISEQ_BODY(iseq)->param.size;
4047
4048 int opt_pc;
4049 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4050 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4051 }
4052 else {
4053 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4054 }
4055
4056 cfp->sp = argv - 1; // -1 for the receiver
4057
4058 vm_push_frame(ec, iseq,
4059 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4060 calling->recv,
4061 VM_GUARDED_PREV_EP(captured->ep),
4062 (VALUE)cme,
4063 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4064 argv + arg_size,
4065 ISEQ_BODY(iseq)->local_table_size - arg_size,
4066 ISEQ_BODY(iseq)->stack_max);
4067
4068 return Qundef;
4069}
4070
4071static VALUE
4072vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4073{
4074 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4075
4076 VALUE *argv;
4077 int argc;
4078 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4079 if (UNLIKELY(calling->heap_argv)) {
4080 argv = RARRAY_PTR(calling->heap_argv);
4081 cfp->sp -= 2;
4082 }
4083 else {
4084 argc = calling->argc;
4085 argv = ALLOCA_N(VALUE, argc);
4086 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4087 cfp->sp += - argc - 1;
4088 }
4089
4090 return vm_call_bmethod_body(ec, calling, argv);
4091}
4092
4093static VALUE
4094vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4095{
4096 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4097
4098 const struct rb_callcache *cc = calling->cc;
4099 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4100 VALUE procv = cme->def->body.bmethod.proc;
4101 rb_proc_t *proc;
4102 GetProcPtr(procv, proc);
4103 const struct rb_block *block = &proc->block;
4104
4105 while (vm_block_type(block) == block_type_proc) {
4106 block = vm_proc_block(block->as.proc);
4107 }
4108 if (vm_block_type(block) == block_type_iseq) {
4109 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4110 return vm_call_iseq_bmethod(ec, cfp, calling);
4111 }
4112
4113 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4114 return vm_call_noniseq_bmethod(ec, cfp, calling);
4115}
4116
4117VALUE
4118rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4119{
4120 VALUE klass = current_class;
4121
4122 /* for prepended Module, then start from cover class */
4123 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
4124 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4125 klass = RBASIC_CLASS(klass);
4126 }
4127
4128 while (RTEST(klass)) {
4129 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4130 if (owner == target_owner) {
4131 return klass;
4132 }
4133 klass = RCLASS_SUPER(klass);
4134 }
4135
4136 return current_class; /* maybe module function */
4137}
4138
4139static const rb_callable_method_entry_t *
4140aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4141{
4142 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4143 const rb_callable_method_entry_t *cme;
4144
4145 if (orig_me->defined_class == 0) {
4146 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4147 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4148 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4149
4150 if (me->def->reference_count == 1) {
4151 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4152 }
4153 else {
4155 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4156 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4157 }
4158 }
4159 else {
4160 cme = (const rb_callable_method_entry_t *)orig_me;
4161 }
4162
4163 VM_ASSERT(callable_method_entry_p(cme));
4164 return cme;
4165}
4166
4168rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4169{
4170 return aliased_callable_method_entry(me);
4171}
4172
4173static VALUE
4174vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4175{
4176 calling->cc = &VM_CC_ON_STACK(Qundef,
4177 vm_call_general,
4178 {{0}},
4179 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4180
4181 return vm_call_method_each_type(ec, cfp, calling);
4182}
4183
4184static enum method_missing_reason
4185ci_missing_reason(const struct rb_callinfo *ci)
4186{
4187 enum method_missing_reason stat = MISSING_NOENTRY;
4188 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4189 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4190 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4191 return stat;
4192}
4193
4194static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4195
4196static VALUE
4197vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4198 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4199{
4200 ASSUME(calling->argc >= 0);
4201
4202 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4203 int argc = calling->argc;
4204 VALUE recv = calling->recv;
4205 VALUE klass = CLASS_OF(recv);
4206 ID mid = rb_check_id(&symbol);
4207 flags |= VM_CALL_OPT_SEND;
4208
4209 if (UNLIKELY(! mid)) {
4210 mid = idMethodMissing;
4211 missing_reason = ci_missing_reason(ci);
4212 ec->method_missing_reason = missing_reason;
4213
4214 VALUE argv_ary;
4215 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4216 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4217 rb_ary_unshift(argv_ary, symbol);
4218
4219 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4220 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4221 VALUE exc = rb_make_no_method_exception(
4222 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4223
4224 rb_exc_raise(exc);
4225 }
4226 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4227 }
4228 else {
4229 /* E.g. when argc == 2
4230 *
4231 * | | | | TOPN
4232 * | | +------+
4233 * | | +---> | arg1 | 0
4234 * +------+ | +------+
4235 * | arg1 | -+ +-> | arg0 | 1
4236 * +------+ | +------+
4237 * | arg0 | ---+ | sym | 2
4238 * +------+ +------+
4239 * | recv | | recv | 3
4240 * --+------+--------+------+------
4241 */
4242 int i = argc;
4243 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4244 INC_SP(1);
4245 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4246 argc = ++calling->argc;
4247
4248 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4249 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4250 TOPN(i) = symbol;
4251 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4252 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4253 VALUE exc = rb_make_no_method_exception(
4254 rb_eNoMethodError, 0, recv, argc, argv, priv);
4255
4256 rb_exc_raise(exc);
4257 }
4258 else {
4259 TOPN(i) = rb_str_intern(symbol);
4260 }
4261 }
4262 }
4263
4264 struct rb_forwarding_call_data new_fcd = {
4265 .cd = {
4266 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4267 .cc = NULL,
4268 },
4269 .caller_ci = NULL,
4270 };
4271
4272 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4273 calling->cd = &new_fcd.cd;
4274 }
4275 else {
4276 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4277 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4278 new_fcd.caller_ci = caller_ci;
4279 calling->cd = (struct rb_call_data *)&new_fcd;
4280 }
4281 calling->cc = &VM_CC_ON_STACK(klass,
4282 vm_call_general,
4283 { .method_missing_reason = missing_reason },
4284 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4285
4286 if (flags & VM_CALL_FCALL) {
4287 return vm_call_method(ec, reg_cfp, calling);
4288 }
4289
4290 const struct rb_callcache *cc = calling->cc;
4291 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4292
4293 if (vm_cc_cme(cc) != NULL) {
4294 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4295 case METHOD_VISI_PUBLIC: /* likely */
4296 return vm_call_method_each_type(ec, reg_cfp, calling);
4297 case METHOD_VISI_PRIVATE:
4298 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4299 break;
4300 case METHOD_VISI_PROTECTED:
4301 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4302 break;
4303 default:
4304 VM_UNREACHABLE(vm_call_method);
4305 }
4306 return vm_call_method_missing(ec, reg_cfp, calling);
4307 }
4308
4309 return vm_call_method_nome(ec, reg_cfp, calling);
4310}
4311
4312static VALUE
4313vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4314{
4315 const struct rb_callinfo *ci = calling->cd->ci;
4316 int i;
4317 VALUE sym;
4318
4319 i = calling->argc - 1;
4320
4321 if (calling->argc == 0) {
4322 rb_raise(rb_eArgError, "no method name given");
4323 }
4324
4325 sym = TOPN(i);
4326 /* E.g. when i == 2
4327 *
4328 * | | | | TOPN
4329 * +------+ | |
4330 * | arg1 | ---+ | | 0
4331 * +------+ | +------+
4332 * | arg0 | -+ +-> | arg1 | 1
4333 * +------+ | +------+
4334 * | sym | +---> | arg0 | 2
4335 * +------+ +------+
4336 * | recv | | recv | 3
4337 * --+------+--------+------+------
4338 */
4339 /* shift arguments */
4340 if (i > 0) {
4341 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4342 }
4343 calling->argc -= 1;
4344 DEC_SP(1);
4345
4346 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4347}
4348
4349static VALUE
4350vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4351{
4352 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4353 const struct rb_callinfo *ci = calling->cd->ci;
4354 int flags = VM_CALL_FCALL;
4355 VALUE sym;
4356
4357 VALUE argv_ary;
4358 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4359 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4360 sym = rb_ary_shift(argv_ary);
4361 flags |= VM_CALL_ARGS_SPLAT;
4362 if (calling->kw_splat) {
4363 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4364 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4365 calling->kw_splat = 0;
4366 }
4367 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4368 }
4369
4370 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4371 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4372}
4373
4374static VALUE
4375vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4376{
4377 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4378 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4379}
4380
4381static VALUE
4382vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4383{
4384 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4385
4386 const struct rb_callinfo *ci = calling->cd->ci;
4387 int flags = vm_ci_flag(ci);
4388
4389 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4390 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4391 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4392 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4393 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4394 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4395 }
4396
4397 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4398 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4399}
4400
4401static VALUE
4402vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4403 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4404{
4405 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4406
4407 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4408 unsigned int argc, flag;
4409
4410 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4411 argc = ++calling->argc;
4412
4413 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4414 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4415 vm_check_canary(ec, reg_cfp->sp);
4416 if (argc > 1) {
4417 MEMMOVE(argv+1, argv, VALUE, argc-1);
4418 }
4419 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4420 INC_SP(1);
4421
4422 ec->method_missing_reason = reason;
4423
4424 struct rb_forwarding_call_data new_fcd = {
4425 .cd = {
4426 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4427 .cc = NULL,
4428 },
4429 .caller_ci = NULL,
4430 };
4431
4432 if (!(flag & VM_CALL_FORWARDING)) {
4433 calling->cd = &new_fcd.cd;
4434 }
4435 else {
4436 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4437 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4438 new_fcd.caller_ci = caller_ci;
4439 calling->cd = (struct rb_call_data *)&new_fcd;
4440 }
4441
4442 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4443 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4444 return vm_call_method(ec, reg_cfp, calling);
4445}
4446
4447static VALUE
4448vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4449{
4450 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4451}
4452
4453static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4454static VALUE
4455vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4456{
4457 klass = RCLASS_SUPER(klass);
4458
4459 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4460 if (cme == NULL) {
4461 return vm_call_method_nome(ec, cfp, calling);
4462 }
4463 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4464 cme->def->body.refined.orig_me) {
4465 cme = refined_method_callable_without_refinement(cme);
4466 }
4467
4468 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4469
4470 return vm_call_method_each_type(ec, cfp, calling);
4471}
4472
4473static inline VALUE
4474find_refinement(VALUE refinements, VALUE klass)
4475{
4476 if (NIL_P(refinements)) {
4477 return Qnil;
4478 }
4479 return rb_hash_lookup(refinements, klass);
4480}
4481
4482PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4483static rb_control_frame_t *
4484current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4485{
4486 rb_control_frame_t *top_cfp = cfp;
4487
4488 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4489 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4490
4491 do {
4492 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4493 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4494 /* TODO: orphan block */
4495 return top_cfp;
4496 }
4497 } while (cfp->iseq != local_iseq);
4498 }
4499 return cfp;
4500}
4501
4502static const rb_callable_method_entry_t *
4503refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4504{
4505 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4506 const rb_callable_method_entry_t *cme;
4507
4508 if (orig_me->defined_class == 0) {
4509 cme = NULL;
4511 }
4512 else {
4513 cme = (const rb_callable_method_entry_t *)orig_me;
4514 }
4515
4516 VM_ASSERT(callable_method_entry_p(cme));
4517
4518 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4519 cme = NULL;
4520 }
4521
4522 return cme;
4523}
4524
4525static const rb_callable_method_entry_t *
4526search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4527{
4528 ID mid = vm_ci_mid(calling->cd->ci);
4529 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4530 const struct rb_callcache * const cc = calling->cc;
4531 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4532
4533 for (; cref; cref = CREF_NEXT(cref)) {
4534 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4535 if (NIL_P(refinement)) continue;
4536
4537 const rb_callable_method_entry_t *const ref_me =
4538 rb_callable_method_entry(refinement, mid);
4539
4540 if (ref_me) {
4541 if (vm_cc_call(cc) == vm_call_super_method) {
4542 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4543 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4544 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4545 continue;
4546 }
4547 }
4548
4549 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4550 cme->def != ref_me->def) {
4551 cme = ref_me;
4552 }
4553 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4554 return cme;
4555 }
4556 }
4557 else {
4558 return NULL;
4559 }
4560 }
4561
4562 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4563 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4564 }
4565 else {
4566 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4567 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4568 return cme;
4569 }
4570}
4571
4572static VALUE
4573vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4574{
4575 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4576
4577 if (ref_cme) {
4578 if (calling->cd->cc) {
4579 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4580 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4581 return vm_call_method(ec, cfp, calling);
4582 }
4583 else {
4584 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4585 calling->cc= ref_cc;
4586 return vm_call_method(ec, cfp, calling);
4587 }
4588 }
4589 else {
4590 return vm_call_method_nome(ec, cfp, calling);
4591 }
4592}
4593
4594static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4595
4596NOINLINE(static VALUE
4597 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4598 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4599
4600static VALUE
4601vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4602 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4603{
4604 int argc = calling->argc;
4605
4606 /* remove self */
4607 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4608 DEC_SP(1);
4609
4610 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4611}
4612
4613static VALUE
4614vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4615{
4616 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4617
4618 const struct rb_callinfo *ci = calling->cd->ci;
4619 VALUE procval = calling->recv;
4620 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4621}
4622
4623static VALUE
4624vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4625{
4626 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4627
4628 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4629 const struct rb_callinfo *ci = calling->cd->ci;
4630
4631 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4632 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4633 }
4634 else {
4635 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4636 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4637 return vm_call_general(ec, reg_cfp, calling);
4638 }
4639}
4640
4641static VALUE
4642vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4643{
4644 VALUE recv = calling->recv;
4645
4646 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4647 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4648 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4649
4650 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4651 return internal_RSTRUCT_GET(recv, off);
4652}
4653
4654static VALUE
4655vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4656{
4657 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4658
4659 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4660 reg_cfp->sp -= 1;
4661 return ret;
4662}
4663
4664static VALUE
4665vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4666{
4667 VALUE recv = calling->recv;
4668
4669 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4670 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4671 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4672
4673 rb_check_frozen(recv);
4674
4675 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4676 internal_RSTRUCT_SET(recv, off, val);
4677
4678 return val;
4679}
4680
4681static VALUE
4682vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4683{
4684 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4685
4686 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4687 reg_cfp->sp -= 2;
4688 return ret;
4689}
4690
4691NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4692 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4693
4694#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4695 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4696 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4697 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4698 var = func; \
4699 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4700 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4701 } \
4702 else { \
4703 nohook; \
4704 var = func; \
4705 }
4706
4707static VALUE
4708vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4709 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4710{
4711 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4712 case OPTIMIZED_METHOD_TYPE_SEND:
4713 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4714 return vm_call_opt_send(ec, cfp, calling);
4715 case OPTIMIZED_METHOD_TYPE_CALL:
4716 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4717 return vm_call_opt_call(ec, cfp, calling);
4718 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4719 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4720 return vm_call_opt_block_call(ec, cfp, calling);
4721 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4722 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4723 rb_check_arity(calling->argc, 0, 0);
4724
4725 VALUE v;
4726 VM_CALL_METHOD_ATTR(v,
4727 vm_call_opt_struct_aref(ec, cfp, calling),
4728 set_vm_cc_ivar(cc); \
4729 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4730 return v;
4731 }
4732 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4733 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4734 rb_check_arity(calling->argc, 1, 1);
4735
4736 VALUE v;
4737 VM_CALL_METHOD_ATTR(v,
4738 vm_call_opt_struct_aset(ec, cfp, calling),
4739 set_vm_cc_ivar(cc); \
4740 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4741 return v;
4742 }
4743 default:
4744 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4745 }
4746}
4747
4748static VALUE
4749vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4750{
4751 const struct rb_callinfo *ci = calling->cd->ci;
4752 const struct rb_callcache *cc = calling->cc;
4753 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4754 VALUE v;
4755
4756 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4757
4758 switch (cme->def->type) {
4759 case VM_METHOD_TYPE_ISEQ:
4760 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4761 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4762 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4763 }
4764 else {
4765 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4766 return vm_call_iseq_setup(ec, cfp, calling);
4767 }
4768
4769 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4770 case VM_METHOD_TYPE_CFUNC:
4771 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4772 return vm_call_cfunc(ec, cfp, calling);
4773
4774 case VM_METHOD_TYPE_ATTRSET:
4775 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4776
4777 rb_check_arity(calling->argc, 1, 1);
4778
4779 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4780
4781 if (vm_cc_markable(cc)) {
4782 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4783 VM_CALL_METHOD_ATTR(v,
4784 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4785 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4786 }
4787 else {
4788 cc = &((struct rb_callcache) {
4789 .flags = T_IMEMO |
4790 (imemo_callcache << FL_USHIFT) |
4791 VM_CALLCACHE_UNMARKABLE |
4792 VM_CALLCACHE_ON_STACK,
4793 .klass = cc->klass,
4794 .cme_ = cc->cme_,
4795 .call_ = cc->call_,
4796 .aux_ = {
4797 .attr = {
4798 .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4799 }
4800 },
4801 });
4802
4803 VM_CALL_METHOD_ATTR(v,
4804 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4805 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4806 }
4807 return v;
4808
4809 case VM_METHOD_TYPE_IVAR:
4810 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4811 rb_check_arity(calling->argc, 0, 0);
4812 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4813 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4814 VM_CALL_METHOD_ATTR(v,
4815 vm_call_ivar(ec, cfp, calling),
4816 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4817 return v;
4818
4819 case VM_METHOD_TYPE_MISSING:
4820 vm_cc_method_missing_reason_set(cc, 0);
4821 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4822 return vm_call_method_missing(ec, cfp, calling);
4823
4824 case VM_METHOD_TYPE_BMETHOD:
4825 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4826 return vm_call_bmethod(ec, cfp, calling);
4827
4828 case VM_METHOD_TYPE_ALIAS:
4829 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4830 return vm_call_alias(ec, cfp, calling);
4831
4832 case VM_METHOD_TYPE_OPTIMIZED:
4833 return vm_call_optimized(ec, cfp, calling, ci, cc);
4834
4835 case VM_METHOD_TYPE_UNDEF:
4836 break;
4837
4838 case VM_METHOD_TYPE_ZSUPER:
4839 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4840
4841 case VM_METHOD_TYPE_REFINED:
4842 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4843 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4844 return vm_call_refined(ec, cfp, calling);
4845 }
4846
4847 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4848}
4849
4850NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4851
4852static VALUE
4853vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4854{
4855 /* method missing */
4856 const struct rb_callinfo *ci = calling->cd->ci;
4857 const int stat = ci_missing_reason(ci);
4858
4859 if (vm_ci_mid(ci) == idMethodMissing) {
4860 if (UNLIKELY(calling->heap_argv)) {
4861 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4862 }
4863 else {
4864 rb_control_frame_t *reg_cfp = cfp;
4865 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4866 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4867 }
4868 }
4869 else {
4870 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4871 }
4872}
4873
4874/* Protected method calls and super invocations need to check that the receiver
4875 * (self for super) inherits the module on which the method is defined.
4876 * In the case of refinements, it should consider the original class not the
4877 * refinement.
4878 */
4879static VALUE
4880vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4881{
4882 VALUE defined_class = me->defined_class;
4883 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4884 return NIL_P(refined_class) ? defined_class : refined_class;
4885}
4886
4887static inline VALUE
4888vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4889{
4890 const struct rb_callinfo *ci = calling->cd->ci;
4891 const struct rb_callcache *cc = calling->cc;
4892
4893 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4894
4895 if (vm_cc_cme(cc) != NULL) {
4896 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4897 case METHOD_VISI_PUBLIC: /* likely */
4898 return vm_call_method_each_type(ec, cfp, calling);
4899
4900 case METHOD_VISI_PRIVATE:
4901 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4902 enum method_missing_reason stat = MISSING_PRIVATE;
4903 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4904
4905 vm_cc_method_missing_reason_set(cc, stat);
4906 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4907 return vm_call_method_missing(ec, cfp, calling);
4908 }
4909 return vm_call_method_each_type(ec, cfp, calling);
4910
4911 case METHOD_VISI_PROTECTED:
4912 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4913 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4914 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4915 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4916 return vm_call_method_missing(ec, cfp, calling);
4917 }
4918 else {
4919 /* caching method info to dummy cc */
4920 VM_ASSERT(vm_cc_cme(cc) != NULL);
4921 struct rb_callcache cc_on_stack = *cc;
4922 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4923 calling->cc = &cc_on_stack;
4924 return vm_call_method_each_type(ec, cfp, calling);
4925 }
4926 }
4927 return vm_call_method_each_type(ec, cfp, calling);
4928
4929 default:
4930 rb_bug("unreachable");
4931 }
4932 }
4933 else {
4934 return vm_call_method_nome(ec, cfp, calling);
4935 }
4936}
4937
4938static VALUE
4939vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4940{
4941 RB_DEBUG_COUNTER_INC(ccf_general);
4942 return vm_call_method(ec, reg_cfp, calling);
4943}
4944
4945void
4946rb_vm_cc_general(const struct rb_callcache *cc)
4947{
4948 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4949 VM_ASSERT(cc != vm_cc_empty());
4950
4951 *(vm_call_handler *)&cc->call_ = vm_call_general;
4952}
4953
4954static VALUE
4955vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4956{
4957 RB_DEBUG_COUNTER_INC(ccf_super_method);
4958
4959 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4960 // can merge the function and the address of the function becomes same.
4961 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4962 if (ec == NULL) rb_bug("unreachable");
4963
4964 /* this check is required to distinguish with other functions. */
4965 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4966 return vm_call_method(ec, reg_cfp, calling);
4967}
4968
4969/* super */
4970
4971static inline VALUE
4972vm_search_normal_superclass(VALUE klass)
4973{
4974 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4975 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4976 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4977 klass = RBASIC(klass)->klass;
4978 }
4979 klass = RCLASS_ORIGIN(klass);
4980 return RCLASS_SUPER(klass);
4981}
4982
4983NORETURN(static void vm_super_outside(void));
4984
4985static void
4986vm_super_outside(void)
4987{
4988 rb_raise(rb_eNoMethodError, "super called outside of method");
4989}
4990
4991static const struct rb_callcache *
4992empty_cc_for_super(void)
4993{
4994 return &vm_empty_cc_for_super;
4995}
4996
4997static const struct rb_callcache *
4998vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
4999{
5000 VALUE current_defined_class;
5001 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5002
5003 if (!me) {
5004 vm_super_outside();
5005 }
5006
5007 current_defined_class = vm_defined_class_for_protected_call(me);
5008
5009 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5010 reg_cfp->iseq != method_entry_iseqptr(me) &&
5011 !rb_obj_is_kind_of(recv, current_defined_class)) {
5012 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5013 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5014
5015 if (m) { /* not bound UnboundMethod */
5016 rb_raise(rb_eTypeError,
5017 "self has wrong type to call super in this context: "
5018 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5019 rb_obj_class(recv), m);
5020 }
5021 }
5022
5023 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5024 rb_raise(rb_eRuntimeError,
5025 "implicit argument passing of super from method defined"
5026 " by define_method() is not supported."
5027 " Specify all arguments explicitly.");
5028 }
5029
5030 ID mid = me->def->original_id;
5031
5032 if (!vm_ci_markable(cd->ci)) {
5033 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5034 }
5035 else {
5036 // update iseq. really? (TODO)
5037 cd->ci = vm_ci_new_runtime(mid,
5038 vm_ci_flag(cd->ci),
5039 vm_ci_argc(cd->ci),
5040 vm_ci_kwarg(cd->ci));
5041
5042 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5043 }
5044
5045 const struct rb_callcache *cc;
5046
5047 VALUE klass = vm_search_normal_superclass(me->defined_class);
5048
5049 if (!klass) {
5050 /* bound instance method of module */
5051 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5052 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5053 }
5054 else {
5055 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5056 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5057
5058 // define_method can cache for different method id
5059 if (cached_cme == NULL) {
5060 // empty_cc_for_super is not markable object
5061 cd->cc = empty_cc_for_super();
5062 }
5063 else if (cached_cme->called_id != mid) {
5064 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5065 if (cme) {
5066 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5067 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5068 }
5069 else {
5070 cd->cc = cc = empty_cc_for_super();
5071 }
5072 }
5073 else {
5074 switch (cached_cme->def->type) {
5075 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5076 case VM_METHOD_TYPE_REFINED:
5077 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5078 case VM_METHOD_TYPE_ATTRSET:
5079 case VM_METHOD_TYPE_IVAR:
5080 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5081 break;
5082 default:
5083 break; // use fastpath
5084 }
5085 }
5086 }
5087
5088 VM_ASSERT((vm_cc_cme(cc), true));
5089
5090 return cc;
5091}
5092
5093/* yield */
5094
5095static inline int
5096block_proc_is_lambda(const VALUE procval)
5097{
5098 rb_proc_t *proc;
5099
5100 if (procval) {
5101 GetProcPtr(procval, proc);
5102 return proc->is_lambda;
5103 }
5104 else {
5105 return 0;
5106 }
5107}
5108
5109static VALUE
5110vm_yield_with_cfunc(rb_execution_context_t *ec,
5111 const struct rb_captured_block *captured,
5112 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5114{
5115 int is_lambda = FALSE; /* TODO */
5116 VALUE val, arg, blockarg;
5117 int frame_flag;
5118 const struct vm_ifunc *ifunc = captured->code.ifunc;
5119
5120 if (is_lambda) {
5121 arg = rb_ary_new4(argc, argv);
5122 }
5123 else if (argc == 0) {
5124 arg = Qnil;
5125 }
5126 else {
5127 arg = argv[0];
5128 }
5129
5130 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5131
5132 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5133 if (kw_splat) {
5134 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5135 }
5136
5137 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5138 frame_flag,
5139 self,
5140 VM_GUARDED_PREV_EP(captured->ep),
5141 (VALUE)me,
5142 0, ec->cfp->sp, 0, 0);
5143 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5144 rb_vm_pop_frame(ec);
5145
5146 return val;
5147}
5148
5149VALUE
5150rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5151{
5152 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5153}
5154
5155static VALUE
5156vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5157{
5158 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5159}
5160
5161static inline int
5162vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5163{
5164 int i;
5165 long len = RARRAY_LEN(ary);
5166
5167 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5168
5169 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5170 argv[i] = RARRAY_AREF(ary, i);
5171 }
5172
5173 return i;
5174}
5175
5176static inline VALUE
5177vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5178{
5179 VALUE ary, arg0 = argv[0];
5180 ary = rb_check_array_type(arg0);
5181#if 0
5182 argv[0] = arg0;
5183#else
5184 VM_ASSERT(argv[0] == arg0);
5185#endif
5186 return ary;
5187}
5188
5189static int
5190vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5191{
5192 if (rb_simple_iseq_p(iseq)) {
5193 rb_control_frame_t *cfp = ec->cfp;
5194 VALUE arg0;
5195
5196 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5197
5198 if (arg_setup_type == arg_setup_block &&
5199 calling->argc == 1 &&
5200 ISEQ_BODY(iseq)->param.flags.has_lead &&
5201 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5202 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5203 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5204 }
5205
5206 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5207 if (arg_setup_type == arg_setup_block) {
5208 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5209 int i;
5210 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5211 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5212 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5213 }
5214 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5215 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5216 }
5217 }
5218 else {
5219 argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5220 }
5221 }
5222
5223 return 0;
5224 }
5225 else {
5226 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5227 }
5228}
5229
5230static int
5231vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5232{
5233 struct rb_calling_info calling_entry, *calling;
5234
5235 calling = &calling_entry;
5236 calling->argc = argc;
5237 calling->block_handler = block_handler;
5238 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5239 calling->recv = Qundef;
5240 calling->heap_argv = 0;
5241 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5242
5243 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5244}
5245
5246/* ruby iseq -> ruby block */
5247
5248static VALUE
5249vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5250 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5251 bool is_lambda, VALUE block_handler)
5252{
5253 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5254 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5255 const int arg_size = ISEQ_BODY(iseq)->param.size;
5256 VALUE * const rsp = GET_SP() - calling->argc;
5257 VALUE * const argv = rsp;
5258 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5259
5260 SET_SP(rsp);
5261
5262 vm_push_frame(ec, iseq,
5263 VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
5264 captured->self,
5265 VM_GUARDED_PREV_EP(captured->ep), 0,
5266 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5267 rsp + arg_size,
5268 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5269
5270 return Qundef;
5271}
5272
5273static VALUE
5274vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5275 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5276 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5277{
5278 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5279 int flags = vm_ci_flag(ci);
5280
5281 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5282 ((calling->argc == 0) ||
5283 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5284 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5285 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5286 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5287 flags = 0;
5288 if (UNLIKELY(calling->heap_argv)) {
5289#if VM_ARGC_STACK_MAX < 0
5290 if (RARRAY_LEN(calling->heap_argv) < 1) {
5291 rb_raise(rb_eArgError, "no receiver given");
5292 }
5293#endif
5294 calling->recv = rb_ary_shift(calling->heap_argv);
5295 // Modify stack to avoid cfp consistency error
5296 reg_cfp->sp++;
5297 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5298 reg_cfp->sp[-2] = calling->recv;
5299 flags |= VM_CALL_ARGS_SPLAT;
5300 }
5301 else {
5302 if (calling->argc < 1) {
5303 rb_raise(rb_eArgError, "no receiver given");
5304 }
5305 calling->recv = TOPN(--calling->argc);
5306 }
5307 if (calling->kw_splat) {
5308 flags |= VM_CALL_KW_SPLAT;
5309 }
5310 }
5311 else {
5312 if (calling->argc < 1) {
5313 rb_raise(rb_eArgError, "no receiver given");
5314 }
5315 calling->recv = TOPN(--calling->argc);
5316 }
5317
5318 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5319}
5320
5321static VALUE
5322vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5323 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5324 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5325{
5326 VALUE val;
5327 int argc;
5328 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5329 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5330 argc = calling->argc;
5331 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5332 POPN(argc); /* TODO: should put before C/yield? */
5333 return val;
5334}
5335
5336static VALUE
5337vm_proc_to_block_handler(VALUE procval)
5338{
5339 const struct rb_block *block = vm_proc_block(procval);
5340
5341 switch (vm_block_type(block)) {
5342 case block_type_iseq:
5343 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5344 case block_type_ifunc:
5345 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5346 case block_type_symbol:
5347 return VM_BH_FROM_SYMBOL(block->as.symbol);
5348 case block_type_proc:
5349 return VM_BH_FROM_PROC(block->as.proc);
5350 }
5351 VM_UNREACHABLE(vm_yield_with_proc);
5352 return Qundef;
5353}
5354
5355static VALUE
5356vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5357 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5358 bool is_lambda, VALUE block_handler)
5359{
5360 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5361 VALUE proc = VM_BH_TO_PROC(block_handler);
5362 is_lambda = block_proc_is_lambda(proc);
5363 block_handler = vm_proc_to_block_handler(proc);
5364 }
5365
5366 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5367}
5368
5369static inline VALUE
5370vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5371 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5372 bool is_lambda, VALUE block_handler)
5373{
5374 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5375 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5376 bool is_lambda, VALUE block_handler);
5377
5378 switch (vm_block_handler_type(block_handler)) {
5379 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5380 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5381 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5382 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5383 default: rb_bug("vm_invoke_block: unreachable");
5384 }
5385
5386 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5387}
5388
5389static VALUE
5390vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5391{
5392 const rb_execution_context_t *ec = GET_EC();
5393 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5394 struct rb_captured_block *captured;
5395
5396 if (cfp == 0) {
5397 rb_bug("vm_make_proc_with_iseq: unreachable");
5398 }
5399
5400 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5401 captured->code.iseq = blockiseq;
5402
5403 return rb_vm_make_proc(ec, captured, rb_cProc);
5404}
5405
5406static VALUE
5407vm_once_exec(VALUE iseq)
5408{
5409 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5410 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5411}
5412
5413static VALUE
5414vm_once_clear(VALUE data)
5415{
5416 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5417 is->once.running_thread = NULL;
5418 return Qnil;
5419}
5420
5421/* defined insn */
5422
5423static bool
5424check_respond_to_missing(VALUE obj, VALUE v)
5425{
5426 VALUE args[2];
5427 VALUE r;
5428
5429 args[0] = obj; args[1] = Qfalse;
5430 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5431 if (!UNDEF_P(r) && RTEST(r)) {
5432 return true;
5433 }
5434 else {
5435 return false;
5436 }
5437}
5438
5439static bool
5440vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5441{
5442 VALUE klass;
5443 enum defined_type type = (enum defined_type)op_type;
5444
5445 switch (type) {
5446 case DEFINED_IVAR:
5447 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5448 break;
5449 case DEFINED_GVAR:
5450 return rb_gvar_defined(SYM2ID(obj));
5451 break;
5452 case DEFINED_CVAR: {
5453 const rb_cref_t *cref = vm_get_cref(GET_EP());
5454 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5455 return rb_cvar_defined(klass, SYM2ID(obj));
5456 break;
5457 }
5458 case DEFINED_CONST:
5459 case DEFINED_CONST_FROM: {
5460 bool allow_nil = type == DEFINED_CONST;
5461 klass = v;
5462 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5463 break;
5464 }
5465 case DEFINED_FUNC:
5466 klass = CLASS_OF(v);
5467 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5468 break;
5469 case DEFINED_METHOD:{
5470 VALUE klass = CLASS_OF(v);
5471 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5472
5473 if (me) {
5474 switch (METHOD_ENTRY_VISI(me)) {
5475 case METHOD_VISI_PRIVATE:
5476 break;
5477 case METHOD_VISI_PROTECTED:
5478 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5479 break;
5480 }
5481 case METHOD_VISI_PUBLIC:
5482 return true;
5483 break;
5484 default:
5485 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5486 }
5487 }
5488 else {
5489 return check_respond_to_missing(obj, v);
5490 }
5491 break;
5492 }
5493 case DEFINED_YIELD:
5494 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5495 return true;
5496 }
5497 break;
5498 case DEFINED_ZSUPER:
5499 {
5500 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5501
5502 if (me) {
5503 VALUE klass = vm_search_normal_superclass(me->defined_class);
5504 if (!klass) return false;
5505
5506 ID id = me->def->original_id;
5507
5508 return rb_method_boundp(klass, id, 0);
5509 }
5510 }
5511 break;
5512 case DEFINED_REF:
5513 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5514 default:
5515 rb_bug("unimplemented defined? type (VM)");
5516 break;
5517 }
5518
5519 return false;
5520}
5521
5522bool
5523rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5524{
5525 return vm_defined(ec, reg_cfp, op_type, obj, v);
5526}
5527
5528static const VALUE *
5529vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5530{
5531 rb_num_t i;
5532 const VALUE *ep = reg_ep;
5533 for (i = 0; i < lv; i++) {
5534 ep = GET_PREV_EP(ep);
5535 }
5536 return ep;
5537}
5538
5539static VALUE
5540vm_get_special_object(const VALUE *const reg_ep,
5541 enum vm_special_object_type type)
5542{
5543 switch (type) {
5544 case VM_SPECIAL_OBJECT_VMCORE:
5545 return rb_mRubyVMFrozenCore;
5546 case VM_SPECIAL_OBJECT_CBASE:
5547 return vm_get_cbase(reg_ep);
5548 case VM_SPECIAL_OBJECT_CONST_BASE:
5549 return vm_get_const_base(reg_ep);
5550 default:
5551 rb_bug("putspecialobject insn: unknown value_type %d", type);
5552 }
5553}
5554
5555static VALUE
5556vm_concat_array(VALUE ary1, VALUE ary2st)
5557{
5558 const VALUE ary2 = ary2st;
5559 VALUE tmp1 = rb_check_to_array(ary1);
5560 VALUE tmp2 = rb_check_to_array(ary2);
5561
5562 if (NIL_P(tmp1)) {
5563 tmp1 = rb_ary_new3(1, ary1);
5564 }
5565 if (tmp1 == ary1) {
5566 tmp1 = rb_ary_dup(ary1);
5567 }
5568
5569 if (NIL_P(tmp2)) {
5570 return rb_ary_push(tmp1, ary2);
5571 } else {
5572 return rb_ary_concat(tmp1, tmp2);
5573 }
5574}
5575
5576static VALUE
5577vm_concat_to_array(VALUE ary1, VALUE ary2st)
5578{
5579 /* ary1 must be a newly created array */
5580 const VALUE ary2 = ary2st;
5581
5582 if (NIL_P(ary2)) return ary1;
5583
5584 VALUE tmp2 = rb_check_to_array(ary2);
5585
5586 if (NIL_P(tmp2)) {
5587 return rb_ary_push(ary1, ary2);
5588 } else {
5589 return rb_ary_concat(ary1, tmp2);
5590 }
5591}
5592
5593// YJIT implementation is using the C function
5594// and needs to call a non-static function
5595VALUE
5596rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5597{
5598 return vm_concat_array(ary1, ary2st);
5599}
5600
5601VALUE
5602rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5603{
5604 return vm_concat_to_array(ary1, ary2st);
5605}
5606
5607static VALUE
5608vm_splat_array(VALUE flag, VALUE ary)
5609{
5610 if (NIL_P(ary)) {
5611 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5612 }
5613 VALUE tmp = rb_check_to_array(ary);
5614 if (NIL_P(tmp)) {
5615 return rb_ary_new3(1, ary);
5616 }
5617 else if (RTEST(flag)) {
5618 return rb_ary_dup(tmp);
5619 }
5620 else {
5621 return tmp;
5622 }
5623}
5624
5625// YJIT implementation is using the C function
5626// and needs to call a non-static function
5627VALUE
5628rb_vm_splat_array(VALUE flag, VALUE ary)
5629{
5630 return vm_splat_array(flag, ary);
5631}
5632
5633static VALUE
5634vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5635{
5636 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5637
5638 if (flag & VM_CHECKMATCH_ARRAY) {
5639 long i;
5640 const long n = RARRAY_LEN(pattern);
5641
5642 for (i = 0; i < n; i++) {
5643 VALUE v = RARRAY_AREF(pattern, i);
5644 VALUE c = check_match(ec, v, target, type);
5645
5646 if (RTEST(c)) {
5647 return c;
5648 }
5649 }
5650 return Qfalse;
5651 }
5652 else {
5653 return check_match(ec, pattern, target, type);
5654 }
5655}
5656
5657VALUE
5658rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5659{
5660 return vm_check_match(ec, target, pattern, flag);
5661}
5662
5663static VALUE
5664vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5665{
5666 const VALUE kw_bits = *(ep - bits);
5667
5668 if (FIXNUM_P(kw_bits)) {
5669 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5670 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5671 return Qfalse;
5672 }
5673 else {
5674 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5675 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5676 }
5677 return Qtrue;
5678}
5679
5680static void
5681vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5682{
5683 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5684 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5685 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5686 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5687
5688 switch (flag) {
5689 case RUBY_EVENT_CALL:
5690 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5691 return;
5692 case RUBY_EVENT_C_CALL:
5693 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5694 return;
5695 case RUBY_EVENT_RETURN:
5696 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5697 return;
5699 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5700 return;
5701 }
5702 }
5703}
5704
5705static VALUE
5706vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5707{
5708 if (!rb_const_defined_at(cbase, id)) {
5709 return 0;
5710 }
5711 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5712 return rb_public_const_get_at(cbase, id);
5713 }
5714 else {
5715 return rb_const_get_at(cbase, id);
5716 }
5717}
5718
5719static VALUE
5720vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5721{
5722 if (!RB_TYPE_P(klass, T_CLASS)) {
5723 return 0;
5724 }
5725 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5726 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5727
5728 if (tmp != super) {
5729 rb_raise(rb_eTypeError,
5730 "superclass mismatch for class %"PRIsVALUE"",
5731 rb_id2str(id));
5732 }
5733 else {
5734 return klass;
5735 }
5736 }
5737 else {
5738 return klass;
5739 }
5740}
5741
5742static VALUE
5743vm_check_if_module(ID id, VALUE mod)
5744{
5745 if (!RB_TYPE_P(mod, T_MODULE)) {
5746 return 0;
5747 }
5748 else {
5749 return mod;
5750 }
5751}
5752
5753static VALUE
5754vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5755{
5756 /* new class declaration */
5757 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5758 VALUE c = rb_define_class_id(id, s);
5760 rb_set_class_path_string(c, cbase, rb_id2str(id));
5761 rb_const_set_raw(cbase, id, c);
5762 rb_class_inherited(s, c);
5763 rb_const_added(cbase, id);
5764 return c;
5765}
5766
5767static VALUE
5768vm_declare_module(ID id, VALUE cbase)
5769{
5770 VALUE m = rb_module_new();
5771 rb_set_class_path_string(m, cbase, rb_id2str(id));
5772 rb_const_set(cbase, id, m);
5773 return m;
5774}
5775
5776NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5777static void
5778unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5779{
5780 VALUE name = rb_id2str(id);
5781 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5782 name, type);
5783 VALUE location = rb_const_source_location_at(cbase, id);
5784 if (!NIL_P(location)) {
5785 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5786 " previous definition of %"PRIsVALUE" was here",
5787 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5788 }
5790}
5791
5792static VALUE
5793vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5794{
5795 VALUE klass;
5796
5797 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5798 rb_raise(rb_eTypeError,
5799 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5800 rb_obj_class(super));
5801 }
5802
5803 vm_check_if_namespace(cbase);
5804
5805 /* find klass */
5806 rb_autoload_load(cbase, id);
5807 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5808 if (!vm_check_if_class(id, flags, super, klass))
5809 unmatched_redefinition("class", cbase, id, klass);
5810 return klass;
5811 }
5812 else {
5813 return vm_declare_class(id, flags, cbase, super);
5814 }
5815}
5816
5817static VALUE
5818vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5819{
5820 VALUE mod;
5821
5822 vm_check_if_namespace(cbase);
5823 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5824 if (!vm_check_if_module(id, mod))
5825 unmatched_redefinition("module", cbase, id, mod);
5826 return mod;
5827 }
5828 else {
5829 return vm_declare_module(id, cbase);
5830 }
5831}
5832
5833static VALUE
5834vm_find_or_create_class_by_id(ID id,
5835 rb_num_t flags,
5836 VALUE cbase,
5837 VALUE super)
5838{
5839 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5840
5841 switch (type) {
5842 case VM_DEFINECLASS_TYPE_CLASS:
5843 /* classdef returns class scope value */
5844 return vm_define_class(id, flags, cbase, super);
5845
5846 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5847 /* classdef returns class scope value */
5848 return rb_singleton_class(cbase);
5849
5850 case VM_DEFINECLASS_TYPE_MODULE:
5851 /* classdef returns class scope value */
5852 return vm_define_module(id, flags, cbase);
5853
5854 default:
5855 rb_bug("unknown defineclass type: %d", (int)type);
5856 }
5857}
5858
5859static rb_method_visibility_t
5860vm_scope_visibility_get(const rb_execution_context_t *ec)
5861{
5862 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5863
5864 if (!vm_env_cref_by_cref(cfp->ep)) {
5865 return METHOD_VISI_PUBLIC;
5866 }
5867 else {
5868 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5869 }
5870}
5871
5872static int
5873vm_scope_module_func_check(const rb_execution_context_t *ec)
5874{
5875 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5876
5877 if (!vm_env_cref_by_cref(cfp->ep)) {
5878 return FALSE;
5879 }
5880 else {
5881 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5882 }
5883}
5884
5885static void
5886vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5887{
5888 VALUE klass;
5889 rb_method_visibility_t visi;
5890 rb_cref_t *cref = vm_ec_cref(ec);
5891
5892 if (is_singleton) {
5893 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5894 visi = METHOD_VISI_PUBLIC;
5895 }
5896 else {
5897 klass = CREF_CLASS_FOR_DEFINITION(cref);
5898 visi = vm_scope_visibility_get(ec);
5899 }
5900
5901 if (NIL_P(klass)) {
5902 rb_raise(rb_eTypeError, "no class/module to add method");
5903 }
5904
5905 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5906 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5907 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5908
5909 RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
5910 }
5911
5912 if (!is_singleton && vm_scope_module_func_check(ec)) {
5913 klass = rb_singleton_class(klass);
5914 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5915 }
5916}
5917
5918static VALUE
5919vm_invokeblock_i(struct rb_execution_context_struct *ec,
5920 struct rb_control_frame_struct *reg_cfp,
5921 struct rb_calling_info *calling)
5922{
5923 const struct rb_callinfo *ci = calling->cd->ci;
5924 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5925
5926 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5927 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5928 }
5929 else {
5930 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5931 }
5932}
5933
5934enum method_explorer_type {
5935 mexp_search_method,
5936 mexp_search_invokeblock,
5937 mexp_search_super,
5938};
5939
5940static inline VALUE
5941vm_sendish(
5942 struct rb_execution_context_struct *ec,
5943 struct rb_control_frame_struct *reg_cfp,
5944 struct rb_call_data *cd,
5945 VALUE block_handler,
5946 enum method_explorer_type method_explorer
5947) {
5948 VALUE val = Qundef;
5949 const struct rb_callinfo *ci = cd->ci;
5950 const struct rb_callcache *cc;
5951 int argc = vm_ci_argc(ci);
5952 VALUE recv = TOPN(argc);
5953 struct rb_calling_info calling = {
5954 .block_handler = block_handler,
5955 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5956 .recv = recv,
5957 .argc = argc,
5958 .cd = cd,
5959 };
5960
5961 switch (method_explorer) {
5962 case mexp_search_method:
5963 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5964 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5965 break;
5966 case mexp_search_super:
5967 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5968 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5969 break;
5970 case mexp_search_invokeblock:
5971 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5972 break;
5973 }
5974 return val;
5975}
5976
5977VALUE
5978rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5979{
5980 stack_check(ec);
5981
5982 struct rb_forwarding_call_data adjusted_cd;
5983 struct rb_callinfo adjusted_ci;
5984
5985 VALUE bh;
5986 VALUE val;
5987
5988 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5989 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5990
5991 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5992
5993 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
5994 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
5995 }
5996 }
5997 else {
5998 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
5999 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6000 }
6001
6002 VM_EXEC(ec, val);
6003 return val;
6004}
6005
6006VALUE
6007rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6008{
6009 stack_check(ec);
6010 VALUE bh = VM_BLOCK_HANDLER_NONE;
6011 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6012 VM_EXEC(ec, val);
6013 return val;
6014}
6015
6016VALUE
6017rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6018{
6019 stack_check(ec);
6020 struct rb_forwarding_call_data adjusted_cd;
6021 struct rb_callinfo adjusted_ci;
6022
6023 VALUE bh;
6024 VALUE val;
6025
6026 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6027 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6028
6029 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6030
6031 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6032 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6033 }
6034 }
6035 else {
6036 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6037 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6038 }
6039
6040 VM_EXEC(ec, val);
6041 return val;
6042}
6043
6044VALUE
6045rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6046{
6047 stack_check(ec);
6048 VALUE bh = VM_BLOCK_HANDLER_NONE;
6049 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6050 VM_EXEC(ec, val);
6051 return val;
6052}
6053
6054/* object.c */
6055VALUE rb_nil_to_s(VALUE);
6056VALUE rb_true_to_s(VALUE);
6057VALUE rb_false_to_s(VALUE);
6058/* numeric.c */
6059VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6060VALUE rb_fix_to_s(VALUE);
6061/* variable.c */
6062VALUE rb_mod_to_s(VALUE);
6064
6065static VALUE
6066vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6067{
6068 int type = TYPE(recv);
6069 if (type == T_STRING) {
6070 return recv;
6071 }
6072
6073 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6074
6075 switch (type) {
6076 case T_SYMBOL:
6077 if (check_method_basic_definition(vm_cc_cme(cc))) {
6078 // rb_sym_to_s() allocates a mutable string, but since we are only
6079 // going to use this string for interpolation, it's fine to use the
6080 // frozen string.
6081 return rb_sym2str(recv);
6082 }
6083 break;
6084 case T_MODULE:
6085 case T_CLASS:
6086 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6087 // rb_mod_to_s() allocates a mutable string, but since we are only
6088 // going to use this string for interpolation, it's fine to use the
6089 // frozen string.
6090 VALUE val = rb_mod_name(recv);
6091 if (NIL_P(val)) {
6092 val = rb_mod_to_s(recv);
6093 }
6094 return val;
6095 }
6096 break;
6097 case T_NIL:
6098 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6099 return rb_nil_to_s(recv);
6100 }
6101 break;
6102 case T_TRUE:
6103 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6104 return rb_true_to_s(recv);
6105 }
6106 break;
6107 case T_FALSE:
6108 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6109 return rb_false_to_s(recv);
6110 }
6111 break;
6112 case T_FIXNUM:
6113 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6114 return rb_fix_to_s(recv);
6115 }
6116 break;
6117 }
6118 return Qundef;
6119}
6120
6121static VALUE
6122vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6123{
6124 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6125 return ary;
6126 }
6127 else {
6128 return Qundef;
6129 }
6130}
6131
6132static VALUE
6133vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6134{
6135 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6136 return hash;
6137 }
6138 else {
6139 return Qundef;
6140 }
6141}
6142
6143static VALUE
6144vm_opt_str_freeze(VALUE str, int bop, ID id)
6145{
6146 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6147 return str;
6148 }
6149 else {
6150 return Qundef;
6151 }
6152}
6153
6154/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6155#define id_cmp idCmp
6156
6157static VALUE
6158vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6159{
6160 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6161 return rb_ary_includes(ary, target);
6162 }
6163 else {
6164 VALUE args[1] = {target};
6165
6166 // duparray
6167 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6168 VALUE dupary = rb_ary_resurrect(ary);
6169
6170 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6171 }
6172}
6173
6174VALUE
6175rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6176{
6177 return vm_opt_duparray_include_p(ec, ary, target);
6178}
6179
6180static VALUE
6181vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6182{
6183 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6184 if (num == 0) {
6185 return Qnil;
6186 }
6187 else {
6188 VALUE result = *ptr;
6189 rb_snum_t i = num - 1;
6190 while (i-- > 0) {
6191 const VALUE v = *++ptr;
6192 if (OPTIMIZED_CMP(v, result) > 0) {
6193 result = v;
6194 }
6195 }
6196 return result;
6197 }
6198 }
6199 else {
6200 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6201 }
6202}
6203
6204VALUE
6205rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6206{
6207 return vm_opt_newarray_max(ec, num, ptr);
6208}
6209
6210static VALUE
6211vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6212{
6213 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6214 if (num == 0) {
6215 return Qnil;
6216 }
6217 else {
6218 VALUE result = *ptr;
6219 rb_snum_t i = num - 1;
6220 while (i-- > 0) {
6221 const VALUE v = *++ptr;
6222 if (OPTIMIZED_CMP(v, result) < 0) {
6223 result = v;
6224 }
6225 }
6226 return result;
6227 }
6228 }
6229 else {
6230 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6231 }
6232}
6233
6234VALUE
6235rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6236{
6237 return vm_opt_newarray_min(ec, num, ptr);
6238}
6239
6240static VALUE
6241vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6242{
6243 // If Array#hash is _not_ monkeypatched, use the optimized call
6244 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6245 return rb_ary_hash_values(num, ptr);
6246 }
6247 else {
6248 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6249 }
6250}
6251
6252VALUE
6253rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6254{
6255 return vm_opt_newarray_hash(ec, num, ptr);
6256}
6257
6258VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6259VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6260
6261static VALUE
6262vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6263{
6264 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6265 struct RArray fake_ary;
6266 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6267 return rb_ary_includes(ary, target);
6268 }
6269 else {
6270 VALUE args[1] = {target};
6271 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6272 }
6273}
6274
6275VALUE
6276rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6277{
6278 return vm_opt_newarray_include_p(ec, num, ptr, target);
6279}
6280
6281static VALUE
6282vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6283{
6284 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6285 struct RArray fake_ary;
6286 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6287 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6288 }
6289 else {
6290 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6291 // Setup an array with room for keyword hash.
6292 VALUE args[2];
6293 args[0] = fmt;
6294 int kw_splat = RB_NO_KEYWORDS;
6295 int argc = 1;
6296
6297 if (!UNDEF_P(buffer)) {
6298 args[1] = rb_hash_new_with_size(1);
6299 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6300 kw_splat = RB_PASS_KEYWORDS;
6301 argc++;
6302 }
6303
6304 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6305 }
6306}
6307
6308VALUE
6309rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6310{
6311 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6312}
6313
6314VALUE
6315rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6316{
6317 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6318}
6319
6320#undef id_cmp
6321
6322static void
6323vm_track_constant_cache(ID id, void *ic)
6324{
6325 rb_vm_t *vm = GET_VM();
6326 struct rb_id_table *const_cache = vm->constant_cache;
6327 VALUE lookup_result;
6328 st_table *ics;
6329
6330 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6331 ics = (st_table *)lookup_result;
6332 }
6333 else {
6334 ics = st_init_numtable();
6335 rb_id_table_insert(const_cache, id, (VALUE)ics);
6336 }
6337
6338 /* The call below to st_insert could allocate which could trigger a GC.
6339 * If it triggers a GC, it may free an iseq that also holds a cache to this
6340 * constant. If that iseq is the last iseq with a cache to this constant, then
6341 * it will free this ST table, which would cause an use-after-free during this
6342 * st_insert.
6343 *
6344 * So to fix this issue, we store the ID that is currently being inserted
6345 * and, in remove_from_constant_cache, we don't free the ST table for ID
6346 * equal to this one.
6347 *
6348 * See [Bug #20921].
6349 */
6350 vm->inserting_constant_cache_id = id;
6351
6352 st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
6353
6354 vm->inserting_constant_cache_id = (ID)0;
6355}
6356
6357static void
6358vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6359{
6360 RB_VM_LOCK_ENTER();
6361
6362 for (int i = 0; segments[i]; i++) {
6363 ID id = segments[i];
6364 if (id == idNULL) continue;
6365 vm_track_constant_cache(id, ic);
6366 }
6367
6368 RB_VM_LOCK_LEAVE();
6369}
6370
6371// For JIT inlining
6372static inline bool
6373vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6374{
6375 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6376 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6377
6378 return (ic_cref == NULL || // no need to check CREF
6379 ic_cref == vm_get_cref(reg_ep));
6380 }
6381 return false;
6382}
6383
6384static bool
6385vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6386{
6387 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6388 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6389}
6390
6391// YJIT needs this function to never allocate and never raise
6392bool
6393rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6394{
6395 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6396}
6397
6398static void
6399vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6400{
6401 if (ruby_vm_const_missing_count > 0) {
6402 ruby_vm_const_missing_count = 0;
6403 ic->entry = NULL;
6404 return;
6405 }
6406
6407 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6408 RB_OBJ_WRITE(ice, &ice->value, val);
6409 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6410 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6411 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6412
6413 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6414 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6415 rb_yjit_constant_ic_update(iseq, ic, pos);
6416}
6417
6418VALUE
6419rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6420{
6421 VALUE val;
6422 const ID *segments = ic->segments;
6423 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6424 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6425 val = ice->value;
6426
6427 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6428 }
6429 else {
6430 ruby_vm_constant_cache_misses++;
6431 val = vm_get_ev_const_chain(ec, segments);
6432 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6433 // Undo the PC increment to get the address to this instruction
6434 // INSN_ATTR(width) == 2
6435 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6436 }
6437 return val;
6438}
6439
6440static VALUE
6441vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6442{
6443 rb_thread_t *th = rb_ec_thread_ptr(ec);
6444 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6445
6446 again:
6447 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6448 return is->once.value;
6449 }
6450 else if (is->once.running_thread == NULL) {
6451 VALUE val;
6452 is->once.running_thread = th;
6453 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6454 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6455 /* is->once.running_thread is cleared by vm_once_clear() */
6456 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6457 return val;
6458 }
6459 else if (is->once.running_thread == th) {
6460 /* recursive once */
6461 return vm_once_exec((VALUE)iseq);
6462 }
6463 else {
6464 /* waiting for finish */
6465 RUBY_VM_CHECK_INTS(ec);
6467 goto again;
6468 }
6469}
6470
6471static OFFSET
6472vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6473{
6474 switch (OBJ_BUILTIN_TYPE(key)) {
6475 case -1:
6476 case T_FLOAT:
6477 case T_SYMBOL:
6478 case T_BIGNUM:
6479 case T_STRING:
6480 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6481 SYMBOL_REDEFINED_OP_FLAG |
6482 INTEGER_REDEFINED_OP_FLAG |
6483 FLOAT_REDEFINED_OP_FLAG |
6484 NIL_REDEFINED_OP_FLAG |
6485 TRUE_REDEFINED_OP_FLAG |
6486 FALSE_REDEFINED_OP_FLAG |
6487 STRING_REDEFINED_OP_FLAG)) {
6488 st_data_t val;
6489 if (RB_FLOAT_TYPE_P(key)) {
6490 double kval = RFLOAT_VALUE(key);
6491 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6492 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6493 }
6494 }
6495 if (rb_hash_stlike_lookup(hash, key, &val)) {
6496 return FIX2LONG((VALUE)val);
6497 }
6498 else {
6499 return else_offset;
6500 }
6501 }
6502 }
6503 return 0;
6504}
6505
6506NORETURN(static void
6507 vm_stack_consistency_error(const rb_execution_context_t *ec,
6508 const rb_control_frame_t *,
6509 const VALUE *));
6510static void
6511vm_stack_consistency_error(const rb_execution_context_t *ec,
6512 const rb_control_frame_t *cfp,
6513 const VALUE *bp)
6514{
6515 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6516 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6517 static const char stack_consistency_error[] =
6518 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6519#if defined RUBY_DEVEL
6520 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6521 rb_str_cat_cstr(mesg, "\n");
6522 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6524#else
6525 rb_bug(stack_consistency_error, nsp, nbp);
6526#endif
6527}
6528
6529static VALUE
6530vm_opt_plus(VALUE recv, VALUE obj)
6531{
6532 if (FIXNUM_2_P(recv, obj) &&
6533 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6534 return rb_fix_plus_fix(recv, obj);
6535 }
6536 else if (FLONUM_2_P(recv, obj) &&
6537 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6538 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6539 }
6540 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6541 return Qundef;
6542 }
6543 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6544 RBASIC_CLASS(obj) == rb_cFloat &&
6545 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6546 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6547 }
6548 else if (RBASIC_CLASS(recv) == rb_cString &&
6549 RBASIC_CLASS(obj) == rb_cString &&
6550 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6551 return rb_str_opt_plus(recv, obj);
6552 }
6553 else if (RBASIC_CLASS(recv) == rb_cArray &&
6554 RBASIC_CLASS(obj) == rb_cArray &&
6555 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6556 return rb_ary_plus(recv, obj);
6557 }
6558 else {
6559 return Qundef;
6560 }
6561}
6562
6563static VALUE
6564vm_opt_minus(VALUE recv, VALUE obj)
6565{
6566 if (FIXNUM_2_P(recv, obj) &&
6567 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6568 return rb_fix_minus_fix(recv, obj);
6569 }
6570 else if (FLONUM_2_P(recv, obj) &&
6571 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6572 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6573 }
6574 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6575 return Qundef;
6576 }
6577 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6578 RBASIC_CLASS(obj) == rb_cFloat &&
6579 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6580 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6581 }
6582 else {
6583 return Qundef;
6584 }
6585}
6586
6587static VALUE
6588vm_opt_mult(VALUE recv, VALUE obj)
6589{
6590 if (FIXNUM_2_P(recv, obj) &&
6591 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6592 return rb_fix_mul_fix(recv, obj);
6593 }
6594 else if (FLONUM_2_P(recv, obj) &&
6595 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6596 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6597 }
6598 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6599 return Qundef;
6600 }
6601 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6602 RBASIC_CLASS(obj) == rb_cFloat &&
6603 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6604 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6605 }
6606 else {
6607 return Qundef;
6608 }
6609}
6610
6611static VALUE
6612vm_opt_div(VALUE recv, VALUE obj)
6613{
6614 if (FIXNUM_2_P(recv, obj) &&
6615 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6616 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6617 }
6618 else if (FLONUM_2_P(recv, obj) &&
6619 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6620 return rb_flo_div_flo(recv, obj);
6621 }
6622 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6623 return Qundef;
6624 }
6625 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6626 RBASIC_CLASS(obj) == rb_cFloat &&
6627 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6628 return rb_flo_div_flo(recv, obj);
6629 }
6630 else {
6631 return Qundef;
6632 }
6633}
6634
6635static VALUE
6636vm_opt_mod(VALUE recv, VALUE obj)
6637{
6638 if (FIXNUM_2_P(recv, obj) &&
6639 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6640 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6641 }
6642 else if (FLONUM_2_P(recv, obj) &&
6643 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6644 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6645 }
6646 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6647 return Qundef;
6648 }
6649 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6650 RBASIC_CLASS(obj) == rb_cFloat &&
6651 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6652 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6653 }
6654 else {
6655 return Qundef;
6656 }
6657}
6658
6659static VALUE
6660vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6661{
6662 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6663 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6664
6665 if (!UNDEF_P(val)) {
6666 return RBOOL(!RTEST(val));
6667 }
6668 }
6669
6670 return Qundef;
6671}
6672
6673static VALUE
6674vm_opt_lt(VALUE recv, VALUE obj)
6675{
6676 if (FIXNUM_2_P(recv, obj) &&
6677 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6678 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6679 }
6680 else if (FLONUM_2_P(recv, obj) &&
6681 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6682 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6683 }
6684 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6685 return Qundef;
6686 }
6687 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6688 RBASIC_CLASS(obj) == rb_cFloat &&
6689 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6690 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6691 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6692 }
6693 else {
6694 return Qundef;
6695 }
6696}
6697
6698static VALUE
6699vm_opt_le(VALUE recv, VALUE obj)
6700{
6701 if (FIXNUM_2_P(recv, obj) &&
6702 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6703 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6704 }
6705 else if (FLONUM_2_P(recv, obj) &&
6706 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6707 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6708 }
6709 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6710 return Qundef;
6711 }
6712 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6713 RBASIC_CLASS(obj) == rb_cFloat &&
6714 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6715 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6716 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6717 }
6718 else {
6719 return Qundef;
6720 }
6721}
6722
6723static VALUE
6724vm_opt_gt(VALUE recv, VALUE obj)
6725{
6726 if (FIXNUM_2_P(recv, obj) &&
6727 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6728 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6729 }
6730 else if (FLONUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6732 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6733 }
6734 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6735 return Qundef;
6736 }
6737 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6738 RBASIC_CLASS(obj) == rb_cFloat &&
6739 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6740 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6741 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6742 }
6743 else {
6744 return Qundef;
6745 }
6746}
6747
6748static VALUE
6749vm_opt_ge(VALUE recv, VALUE obj)
6750{
6751 if (FIXNUM_2_P(recv, obj) &&
6752 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6753 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6754 }
6755 else if (FLONUM_2_P(recv, obj) &&
6756 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6757 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6758 }
6759 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6760 return Qundef;
6761 }
6762 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6763 RBASIC_CLASS(obj) == rb_cFloat &&
6764 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6765 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6766 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6767 }
6768 else {
6769 return Qundef;
6770 }
6771}
6772
6773
6774static VALUE
6775vm_opt_ltlt(VALUE recv, VALUE obj)
6776{
6777 if (SPECIAL_CONST_P(recv)) {
6778 return Qundef;
6779 }
6780 else if (RBASIC_CLASS(recv) == rb_cString &&
6781 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6782 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6783 return rb_str_buf_append(recv, obj);
6784 }
6785 else {
6786 return rb_str_concat(recv, obj);
6787 }
6788 }
6789 else if (RBASIC_CLASS(recv) == rb_cArray &&
6790 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6791 return rb_ary_push(recv, obj);
6792 }
6793 else {
6794 return Qundef;
6795 }
6796}
6797
6798static VALUE
6799vm_opt_and(VALUE recv, VALUE obj)
6800{
6801 // If recv and obj are both fixnums, then the bottom tag bit
6802 // will be 1 on both. 1 & 1 == 1, so the result value will also
6803 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6804 // will be 0, and we return Qundef.
6805 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6806
6807 if (FIXNUM_P(ret) &&
6808 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6809 return ret;
6810 }
6811 else {
6812 return Qundef;
6813 }
6814}
6815
6816static VALUE
6817vm_opt_or(VALUE recv, VALUE obj)
6818{
6819 if (FIXNUM_2_P(recv, obj) &&
6820 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6821 return recv | obj;
6822 }
6823 else {
6824 return Qundef;
6825 }
6826}
6827
6828static VALUE
6829vm_opt_aref(VALUE recv, VALUE obj)
6830{
6831 if (SPECIAL_CONST_P(recv)) {
6832 if (FIXNUM_2_P(recv, obj) &&
6833 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6834 return rb_fix_aref(recv, obj);
6835 }
6836 return Qundef;
6837 }
6838 else if (RBASIC_CLASS(recv) == rb_cArray &&
6839 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6840 if (FIXNUM_P(obj)) {
6841 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6842 }
6843 else {
6844 return rb_ary_aref1(recv, obj);
6845 }
6846 }
6847 else if (RBASIC_CLASS(recv) == rb_cHash &&
6848 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6849 return rb_hash_aref(recv, obj);
6850 }
6851 else {
6852 return Qundef;
6853 }
6854}
6855
6856static VALUE
6857vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6858{
6859 if (SPECIAL_CONST_P(recv)) {
6860 return Qundef;
6861 }
6862 else if (RBASIC_CLASS(recv) == rb_cArray &&
6863 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6864 FIXNUM_P(obj)) {
6865 rb_ary_store(recv, FIX2LONG(obj), set);
6866 return set;
6867 }
6868 else if (RBASIC_CLASS(recv) == rb_cHash &&
6869 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6870 rb_hash_aset(recv, obj, set);
6871 return set;
6872 }
6873 else {
6874 return Qundef;
6875 }
6876}
6877
6878static VALUE
6879vm_opt_aref_with(VALUE recv, VALUE key)
6880{
6881 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6882 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6883 rb_hash_compare_by_id_p(recv) == Qfalse &&
6884 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6885 return rb_hash_aref(recv, key);
6886 }
6887 else {
6888 return Qundef;
6889 }
6890}
6891
6892VALUE
6893rb_vm_opt_aref_with(VALUE recv, VALUE key)
6894{
6895 return vm_opt_aref_with(recv, key);
6896}
6897
6898static VALUE
6899vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6900{
6901 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6902 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6903 rb_hash_compare_by_id_p(recv) == Qfalse) {
6904 return rb_hash_aset(recv, key, val);
6905 }
6906 else {
6907 return Qundef;
6908 }
6909}
6910
6911static VALUE
6912vm_opt_length(VALUE recv, int bop)
6913{
6914 if (SPECIAL_CONST_P(recv)) {
6915 return Qundef;
6916 }
6917 else if (RBASIC_CLASS(recv) == rb_cString &&
6918 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6919 if (bop == BOP_EMPTY_P) {
6920 return LONG2NUM(RSTRING_LEN(recv));
6921 }
6922 else {
6923 return rb_str_length(recv);
6924 }
6925 }
6926 else if (RBASIC_CLASS(recv) == rb_cArray &&
6927 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6928 return LONG2NUM(RARRAY_LEN(recv));
6929 }
6930 else if (RBASIC_CLASS(recv) == rb_cHash &&
6931 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6932 return INT2FIX(RHASH_SIZE(recv));
6933 }
6934 else {
6935 return Qundef;
6936 }
6937}
6938
6939static VALUE
6940vm_opt_empty_p(VALUE recv)
6941{
6942 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6943 case Qundef: return Qundef;
6944 case INT2FIX(0): return Qtrue;
6945 default: return Qfalse;
6946 }
6947}
6948
6949VALUE rb_false(VALUE obj);
6950
6951static VALUE
6952vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6953{
6954 if (NIL_P(recv) &&
6955 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6956 return Qtrue;
6957 }
6958 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6959 return Qfalse;
6960 }
6961 else {
6962 return Qundef;
6963 }
6964}
6965
6966static VALUE
6967fix_succ(VALUE x)
6968{
6969 switch (x) {
6970 case ~0UL:
6971 /* 0xFFFF_FFFF == INT2FIX(-1)
6972 * `-1.succ` is of course 0. */
6973 return INT2FIX(0);
6974 case RSHIFT(~0UL, 1):
6975 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6976 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6977 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6978 default:
6979 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6980 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6981 * == lx*2 + ly*2 + 1
6982 * == (lx*2+1) + (ly*2+1) - 1
6983 * == x + y - 1
6984 *
6985 * Here, if we put y := INT2FIX(1):
6986 *
6987 * == x + INT2FIX(1) - 1
6988 * == x + 2 .
6989 */
6990 return x + 2;
6991 }
6992}
6993
6994static VALUE
6995vm_opt_succ(VALUE recv)
6996{
6997 if (FIXNUM_P(recv) &&
6998 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
6999 return fix_succ(recv);
7000 }
7001 else if (SPECIAL_CONST_P(recv)) {
7002 return Qundef;
7003 }
7004 else if (RBASIC_CLASS(recv) == rb_cString &&
7005 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7006 return rb_str_succ(recv);
7007 }
7008 else {
7009 return Qundef;
7010 }
7011}
7012
7013static VALUE
7014vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7015{
7016 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7017 return RBOOL(!RTEST(recv));
7018 }
7019 else {
7020 return Qundef;
7021 }
7022}
7023
7024static VALUE
7025vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7026{
7027 if (SPECIAL_CONST_P(recv)) {
7028 return Qundef;
7029 }
7030 else if (RBASIC_CLASS(recv) == rb_cString &&
7031 CLASS_OF(obj) == rb_cRegexp &&
7032 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7033 return rb_reg_match(obj, recv);
7034 }
7035 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7036 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7037 return rb_reg_match(recv, obj);
7038 }
7039 else {
7040 return Qundef;
7041 }
7042}
7043
7044rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7045
7046NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7047
7048static inline void
7049vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7050 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7051 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7052{
7053 rb_event_flag_t event = pc_events & target_event;
7054 VALUE self = GET_SELF();
7055
7056 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7057
7058 if (event & global_hooks->events) {
7059 /* increment PC because source line is calculated with PC-1 */
7060 reg_cfp->pc++;
7061 vm_dtrace(event, ec);
7062 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7063 reg_cfp->pc--;
7064 }
7065
7066 // Load here since global hook above can add and free local hooks
7067 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7068 if (local_hooks != NULL) {
7069 if (event & local_hooks->events) {
7070 /* increment PC because source line is calculated with PC-1 */
7071 reg_cfp->pc++;
7072 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7073 reg_cfp->pc--;
7074 }
7075 }
7076}
7077
7078#define VM_TRACE_HOOK(target_event, val) do { \
7079 if ((pc_events & (target_event)) & enabled_flags) { \
7080 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7081 } \
7082} while (0)
7083
7084static VALUE
7085rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7086{
7087 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7088 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7089 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7090}
7091
7092static void
7093vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7094{
7095 const VALUE *pc = reg_cfp->pc;
7096 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7097 rb_event_flag_t global_events = enabled_flags;
7098
7099 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7100 return;
7101 }
7102 else {
7103 const rb_iseq_t *iseq = reg_cfp->iseq;
7104 VALUE iseq_val = (VALUE)iseq;
7105 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7106 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7107 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7108 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7109 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7110 rb_hook_list_t *bmethod_local_hooks = NULL;
7111 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7112 rb_event_flag_t bmethod_local_events = 0;
7113 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7114 enabled_flags |= iseq_local_events;
7115
7116 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7117
7118 if (bmethod_frame) {
7119 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7120 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7121 bmethod_local_hooks = me->def->body.bmethod.hooks;
7122 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7123 if (bmethod_local_hooks) {
7124 bmethod_local_events = bmethod_local_hooks->events;
7125 }
7126 }
7127
7128
7129 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7130#if 0
7131 /* disable trace */
7132 /* TODO: incomplete */
7133 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7134#else
7135 /* do not disable trace because of performance problem
7136 * (re-enable overhead)
7137 */
7138#endif
7139 return;
7140 }
7141 else if (ec->trace_arg != NULL) {
7142 /* already tracing */
7143 return;
7144 }
7145 else {
7146 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7147 /* Note, not considering iseq local events here since the same
7148 * iseq could be used in multiple bmethods. */
7149 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7150
7151 if (0) {
7152 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7153 (int)pos,
7154 (int)pc_events,
7155 RSTRING_PTR(rb_iseq_path(iseq)),
7156 (int)rb_iseq_line_no(iseq, pos),
7157 RSTRING_PTR(rb_iseq_label(iseq)));
7158 }
7159 VM_ASSERT(reg_cfp->pc == pc);
7160 VM_ASSERT(pc_events != 0);
7161
7162 /* check traces */
7163 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7164 /* b_call instruction running as a method. Fire call event. */
7165 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7166 }
7168 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7169 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7170 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7171 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7172 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7173 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7174 /* b_return instruction running as a method. Fire return event. */
7175 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7176 }
7177
7178 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7179 // We need the pointer to stay valid in case compaction happens in a trace hook.
7180 //
7181 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7182 // storage for `rb_method_definition_t` is not on the GC heap.
7183 RB_GC_GUARD(iseq_val);
7184 }
7185 }
7186}
7187#undef VM_TRACE_HOOK
7188
7189#if VM_CHECK_MODE > 0
7190NORETURN( NOINLINE( COLDFUNC
7191void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7192
7193void
7194Init_vm_stack_canary(void)
7195{
7196 /* This has to be called _after_ our PRNG is properly set up. */
7197 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7198 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7199
7200 vm_stack_canary_was_born = true;
7201 VM_ASSERT(n == 0);
7202}
7203
7204void
7205rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7206{
7207 /* Because a method has already been called, why not call
7208 * another one. */
7209 const char *insn = rb_insns_name(i);
7210 VALUE inspection = rb_inspect(c);
7211 const char *str = StringValueCStr(inspection);
7212
7213 rb_bug("dead canary found at %s: %s", insn, str);
7214}
7215
7216#else
7217void Init_vm_stack_canary(void) { /* nothing to do */ }
7218#endif
7219
7220
7221/* a part of the following code is generated by this ruby script:
7222
722316.times{|i|
7224 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7225 typedef_args.prepend(", ") if i != 0
7226 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7227 call_args.prepend(", ") if i != 0
7228 puts %Q{
7229static VALUE
7230builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7231{
7232 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7233 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7234}}
7235}
7236
7237puts
7238puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
723916.times{|i|
7240 puts " builtin_invoker#{i},"
7241}
7242puts "};"
7243*/
7244
7245static VALUE
7246builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7247{
7248 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7249 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7250}
7251
7252static VALUE
7253builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7254{
7255 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7256 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7257}
7258
7259static VALUE
7260builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7261{
7262 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7263 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7264}
7265
7266static VALUE
7267builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7268{
7269 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7270 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7271}
7272
7273static VALUE
7274builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7275{
7276 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7277 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7278}
7279
7280static VALUE
7281builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7282{
7283 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7284 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7285}
7286
7287static VALUE
7288builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7289{
7290 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7291 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7292}
7293
7294static VALUE
7295builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7296{
7297 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7298 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7299}
7300
7301static VALUE
7302builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7303{
7304 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7305 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7306}
7307
7308static VALUE
7309builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7310{
7311 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7312 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7313}
7314
7315static VALUE
7316builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7317{
7318 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7319 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7320}
7321
7322static VALUE
7323builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7324{
7325 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7326 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7327}
7328
7329static VALUE
7330builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7331{
7332 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7333 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7334}
7335
7336static VALUE
7337builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7338{
7339 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7340 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7341}
7342
7343static VALUE
7344builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7345{
7346 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7347 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7348}
7349
7350static VALUE
7351builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7352{
7353 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7354 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7355}
7356
7357typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7358
7359static builtin_invoker
7360lookup_builtin_invoker(int argc)
7361{
7362 static const builtin_invoker invokers[] = {
7363 builtin_invoker0,
7364 builtin_invoker1,
7365 builtin_invoker2,
7366 builtin_invoker3,
7367 builtin_invoker4,
7368 builtin_invoker5,
7369 builtin_invoker6,
7370 builtin_invoker7,
7371 builtin_invoker8,
7372 builtin_invoker9,
7373 builtin_invoker10,
7374 builtin_invoker11,
7375 builtin_invoker12,
7376 builtin_invoker13,
7377 builtin_invoker14,
7378 builtin_invoker15,
7379 };
7380
7381 return invokers[argc];
7382}
7383
7384static inline VALUE
7385invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7386{
7387 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7388 SETUP_CANARY(canary_p);
7389 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7390 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7391 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7392 return ret;
7393}
7394
7395static VALUE
7396vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7397{
7398 return invoke_bf(ec, cfp, bf, argv);
7399}
7400
7401static VALUE
7402vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7403{
7404 if (0) { // debug print
7405 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7406 for (int i=0; i<bf->argc; i++) {
7407 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7408 }
7409 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7410 (void *)(uintptr_t)bf->func_ptr);
7411 }
7412
7413 if (bf->argc == 0) {
7414 return invoke_bf(ec, cfp, bf, NULL);
7415 }
7416 else {
7417 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7418 return invoke_bf(ec, cfp, bf, argv);
7419 }
7420}
7421
7422// for __builtin_inline!()
7423
7424VALUE
7425rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7426{
7427 const rb_control_frame_t *cfp = ec->cfp;
7428 return cfp->ep[index];
7429}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2300
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1079
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:971
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:950
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:130
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:675
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:688
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:68
VALUE rb_cArray
Array class.
Definition array.c:40
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2097
VALUE rb_cRegexp
Regexp class.
Definition re.c:2661
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1272
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:680
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_cModule
Module class.
Definition object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:44
VALUE rb_cString
String class.
Definition string.c:79
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1021
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1951
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3716
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1926
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2008
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1909
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1975
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2041
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3695
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5287
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3661
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3937
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2369
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:894
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1480
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3193
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1924
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:3991
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4046
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1415
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3668
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3028
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:130
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3199
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:416
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:1941
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3521
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4068
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:373
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3515
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1293
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1826
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:986
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:36
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:48
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:50
THROW_DATA.
Definition imemo.h:57
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376