Ruby 3.5.0dev (2025-05-16 revision 06a56a7ffcb053d5bc45b9a984082d9301d6819c)
vm_insnhelper.c (06a56a7ffcb053d5bc45b9a984082d9301d6819c)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
83/* critical level
84 * 0: VM stack overflow or about to machine stack overflow
85 * 1: machine stack overflow but may be recoverable
86 * 2: fatal machine stack overflow
87 */
88void
89rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
90{
91 if (rb_during_gc()) {
92 rb_bug("system stack overflow during GC. Faulty native extension?");
93 }
94 if (crit > 1) {
95 ec->raised_flag = RAISED_STACKOVERFLOW;
96 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
97 EC_JUMP_TAG(ec, TAG_RAISE);
98 }
99 ec_stack_overflow(ec, crit == 0);
100}
101
102static inline void stack_check(rb_execution_context_t *ec);
103
104#if VM_CHECK_MODE > 0
105static int
106callable_class_p(VALUE klass)
107{
108#if VM_CHECK_MODE >= 2
109 if (!klass) return FALSE;
110 switch (RB_BUILTIN_TYPE(klass)) {
111 default:
112 break;
113 case T_ICLASS:
114 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
115 case T_MODULE:
116 return TRUE;
117 }
118 while (klass) {
119 if (klass == rb_cBasicObject) {
120 return TRUE;
121 }
122 klass = RCLASS_SUPER(klass);
123 }
124 return FALSE;
125#else
126 return klass != 0;
127#endif
128}
129
130static int
131callable_method_entry_p(const rb_callable_method_entry_t *cme)
132{
133 if (cme == NULL) {
134 return TRUE;
135 }
136 else {
137 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
138
139 if (callable_class_p(cme->defined_class)) {
140 return TRUE;
141 }
142 else {
143 return FALSE;
144 }
145 }
146}
147
148static void
149vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
150{
151 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
152 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
153
154 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
155 cref_or_me_type = imemo_type(cref_or_me);
156 }
157 if (type & VM_FRAME_FLAG_BMETHOD) {
158 req_me = TRUE;
159 }
160
161 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
162 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
163 }
164 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
165 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
166 }
167
168 if (req_me) {
169 if (cref_or_me_type != imemo_ment) {
170 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
171 }
172 }
173 else {
174 if (req_cref && cref_or_me_type != imemo_cref) {
175 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
176 }
177 else { /* cref or Qfalse */
178 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
179 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
180 /* ignore */
181 }
182 else {
183 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
184 }
185 }
186 }
187 }
188
189 if (cref_or_me_type == imemo_ment) {
190 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
191
192 if (!callable_method_entry_p(me)) {
193 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
194 }
195 }
196
197 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
198 VM_ASSERT(iseq == NULL ||
199 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
200 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
201 );
202 }
203 else {
204 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
205 }
206}
207
208static void
209vm_check_frame(VALUE type,
210 VALUE specval,
211 VALUE cref_or_me,
212 const rb_iseq_t *iseq)
213{
214 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
215 VM_ASSERT(FIXNUM_P(type));
216
217#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
218 case magic: \
219 vm_check_frame_detail(type, req_block, req_me, req_cref, \
220 specval, cref_or_me, is_cframe, iseq); \
221 break
222 switch (given_magic) {
223 /* BLK ME CREF CFRAME */
224 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
226 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
227 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
228 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
229 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
230 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
231 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
232 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
233 default:
234 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
235 }
236#undef CHECK
237}
238
239static VALUE vm_stack_canary; /* Initialized later */
240static bool vm_stack_canary_was_born = false;
241
242// Return the index of the instruction right before the given PC.
243// This is needed because insn_entry advances PC before the insn body.
244static unsigned int
245previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
246{
247 unsigned int pos = 0;
248 while (pos < ISEQ_BODY(iseq)->iseq_size) {
249 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
250 unsigned int next_pos = pos + insn_len(opcode);
251 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
252 return pos;
253 }
254 pos = next_pos;
255 }
256 rb_bug("failed to find the previous insn");
257}
258
259void
260rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
261{
262 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
263 const struct rb_iseq_struct *iseq;
264
265 if (! LIKELY(vm_stack_canary_was_born)) {
266 return; /* :FIXME: isn't it rather fatal to enter this branch? */
267 }
268 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
269 /* This is at the very beginning of a thread. cfp does not exist. */
270 return;
271 }
272 else if (! (iseq = GET_ISEQ())) {
273 return;
274 }
275 else if (LIKELY(sp[0] != vm_stack_canary)) {
276 return;
277 }
278 else {
279 /* we are going to call methods below; squash the canary to
280 * prevent infinite loop. */
281 sp[0] = Qundef;
282 }
283
284 const VALUE *orig = rb_iseq_original_iseq(iseq);
285 const VALUE iseqw = rb_iseqw_new(iseq);
286 const VALUE inspection = rb_inspect(iseqw);
287 const char *stri = rb_str_to_cstr(inspection);
288 const VALUE disasm = rb_iseq_disasm(iseq);
289 const char *strd = rb_str_to_cstr(disasm);
290 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
291 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
292 const char *name = insn_name(insn);
293
294 /* rb_bug() is not capable of outputting this large contents. It
295 is designed to run form a SIGSEGV handler, which tends to be
296 very restricted. */
297 ruby_debug_printf(
298 "We are killing the stack canary set by %s, "
299 "at %s@pc=%"PRIdPTR"\n"
300 "watch out the C stack trace.\n"
301 "%s",
302 name, stri, pos, strd);
303 rb_bug("see above.");
304}
305#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
306
307#else
308#define vm_check_canary(ec, sp)
309#define vm_check_frame(a, b, c, d)
310#endif /* VM_CHECK_MODE > 0 */
311
312#if USE_DEBUG_COUNTER
313static void
314vm_push_frame_debug_counter_inc(
315 const struct rb_execution_context_struct *ec,
316 const struct rb_control_frame_struct *reg_cfp,
317 VALUE type)
318{
319 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
320
321 RB_DEBUG_COUNTER_INC(frame_push);
322
323 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
324 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
325 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
326 if (prev) {
327 if (curr) {
328 RB_DEBUG_COUNTER_INC(frame_R2R);
329 }
330 else {
331 RB_DEBUG_COUNTER_INC(frame_R2C);
332 }
333 }
334 else {
335 if (curr) {
336 RB_DEBUG_COUNTER_INC(frame_C2R);
337 }
338 else {
339 RB_DEBUG_COUNTER_INC(frame_C2C);
340 }
341 }
342 }
343
344 switch (type & VM_FRAME_MAGIC_MASK) {
345 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
346 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
347 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
348 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
349 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
350 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
351 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
352 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
353 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
354 }
355
356 rb_bug("unreachable");
357}
358#else
359#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
360#endif
361
362// Return a poison value to be set above the stack top to verify leafness.
363VALUE
364rb_vm_stack_canary(void)
365{
366#if VM_CHECK_MODE > 0
367 return vm_stack_canary;
368#else
369 return 0;
370#endif
371}
372
373STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
374STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
375STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
376
377static void
378vm_push_frame(rb_execution_context_t *ec,
379 const rb_iseq_t *iseq,
380 VALUE type,
381 VALUE self,
382 VALUE specval,
383 VALUE cref_or_me,
384 const VALUE *pc,
385 VALUE *sp,
386 int local_size,
387 int stack_max)
388{
389 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
390
391 vm_check_frame(type, specval, cref_or_me, iseq);
392 VM_ASSERT(local_size >= 0);
393
394 /* check stack overflow */
395 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
396 vm_check_canary(ec, sp);
397
398 /* setup vm value stack */
399
400 /* initialize local variables */
401 for (int i=0; i < local_size; i++) {
402 *sp++ = Qnil;
403 }
404
405 /* setup ep with managing data */
406 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
407 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
408 *sp++ = type; /* ep[-0] / ENV_FLAGS */
409
410 /* setup new frame */
411 *cfp = (const struct rb_control_frame_struct) {
412 .pc = pc,
413 .sp = sp,
414 .iseq = iseq,
415 .self = self,
416 .ep = sp - 1,
417 .block_code = NULL,
418#if VM_DEBUG_BP_CHECK
419 .bp_check = sp,
420#endif
421 .jit_return = NULL,
422 };
423
424 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
425 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
426 future/untested compilers/platforms. */
427
428 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
429 atomic_signal_fence(memory_order_seq_cst);
430 #endif
431
432 ec->cfp = cfp;
433
434 if (VMDEBUG == 2) {
435 SDR();
436 }
437 vm_push_frame_debug_counter_inc(ec, cfp, type);
438}
439
440void
441rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
442{
443 rb_control_frame_t *cfp = ec->cfp;
444
445 if (VMDEBUG == 2) SDR();
446
447 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
448}
449
450/* return TRUE if the frame is finished */
451static inline int
452vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
453{
454 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
455
456 if (VMDEBUG == 2) SDR();
457
458 RUBY_VM_CHECK_INTS(ec);
459 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
460
461 return flags & VM_FRAME_FLAG_FINISH;
462}
463
464void
465rb_vm_pop_frame(rb_execution_context_t *ec)
466{
467 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
468}
469
470// it pushes pseudo-frame with fname filename.
471VALUE
472rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
473{
474 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
475 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
476
477 vm_push_frame(ec,
478 dmy_iseq, //const rb_iseq_t *iseq,
479 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
480 ec->cfp->self, // VALUE self,
481 VM_BLOCK_HANDLER_NONE, // VALUE specval,
482 Qfalse, // VALUE cref_or_me,
483 NULL, // const VALUE *pc,
484 ec->cfp->sp, // VALUE *sp,
485 0, // int local_size,
486 0); // int stack_max
487
488 return (VALUE)dmy_iseq;
489}
490
491/* method dispatch */
492static inline VALUE
493rb_arity_error_new(int argc, int min, int max)
494{
495 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
496 if (min == max) {
497 /* max is not needed */
498 }
499 else if (max == UNLIMITED_ARGUMENTS) {
500 rb_str_cat_cstr(err_mess, "+");
501 }
502 else {
503 rb_str_catf(err_mess, "..%d", max);
504 }
505 rb_str_cat_cstr(err_mess, ")");
506 return rb_exc_new3(rb_eArgError, err_mess);
507}
508
509void
510rb_error_arity(int argc, int min, int max)
511{
512 rb_exc_raise(rb_arity_error_new(argc, min, max));
513}
514
515/* lvar */
516
517NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
518
519static void
520vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
521{
522 /* remember env value forcely */
523 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
524 VM_FORCE_WRITE(&ep[index], v);
525 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
526 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
527}
528
529// YJIT assumes this function never runs GC
530static inline void
531vm_env_write(const VALUE *ep, int index, VALUE v)
532{
533 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
534 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
535 VM_STACK_ENV_WRITE(ep, index, v);
536 }
537 else {
538 vm_env_write_slowpath(ep, index, v);
539 }
540}
541
542void
543rb_vm_env_write(const VALUE *ep, int index, VALUE v)
544{
545 vm_env_write(ep, index, v);
546}
547
548VALUE
549rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
550{
551 if (block_handler == VM_BLOCK_HANDLER_NONE) {
552 return Qnil;
553 }
554 else {
555 switch (vm_block_handler_type(block_handler)) {
556 case block_handler_type_iseq:
557 case block_handler_type_ifunc:
558 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
559 case block_handler_type_symbol:
560 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
561 case block_handler_type_proc:
562 return VM_BH_TO_PROC(block_handler);
563 default:
564 VM_UNREACHABLE(rb_vm_bh_to_procval);
565 }
566 }
567}
568
569/* svar */
570
571#if VM_CHECK_MODE > 0
572static int
573vm_svar_valid_p(VALUE svar)
574{
575 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
576 switch (imemo_type(svar)) {
577 case imemo_svar:
578 case imemo_cref:
579 case imemo_ment:
580 return TRUE;
581 default:
582 break;
583 }
584 }
585 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
586 return FALSE;
587}
588#endif
589
590static inline struct vm_svar *
591lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
592{
593 VALUE svar;
594
595 if (lep && (ec == NULL || ec->root_lep != lep)) {
596 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
597 }
598 else {
599 svar = ec->root_svar;
600 }
601
602 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
603
604 return (struct vm_svar *)svar;
605}
606
607static inline void
608lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
609{
610 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
611
612 if (lep && (ec == NULL || ec->root_lep != lep)) {
613 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
614 }
615 else {
616 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
617 }
618}
619
620static VALUE
621lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
622{
623 const struct vm_svar *svar = lep_svar(ec, lep);
624
625 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
626
627 switch (key) {
628 case VM_SVAR_LASTLINE:
629 return svar->lastline;
630 case VM_SVAR_BACKREF:
631 return svar->backref;
632 default: {
633 const VALUE ary = svar->others;
634
635 if (NIL_P(ary)) {
636 return Qnil;
637 }
638 else {
639 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
640 }
641 }
642 }
643}
644
645static struct vm_svar *
646svar_new(VALUE obj)
647{
648 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
649 *((VALUE *)&svar->lastline) = Qnil;
650 *((VALUE *)&svar->backref) = Qnil;
651 *((VALUE *)&svar->others) = Qnil;
652
653 return svar;
654}
655
656static void
657lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
658{
659 struct vm_svar *svar = lep_svar(ec, lep);
660
661 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
662 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
663 }
664
665 switch (key) {
666 case VM_SVAR_LASTLINE:
667 RB_OBJ_WRITE(svar, &svar->lastline, val);
668 return;
669 case VM_SVAR_BACKREF:
670 RB_OBJ_WRITE(svar, &svar->backref, val);
671 return;
672 default: {
673 VALUE ary = svar->others;
674
675 if (NIL_P(ary)) {
676 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
677 }
678 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
679 }
680 }
681}
682
683static inline VALUE
684vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
685{
686 VALUE val;
687
688 if (type == 0) {
689 val = lep_svar_get(ec, lep, key);
690 }
691 else {
692 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
693
694 if (type & 0x01) {
695 switch (type >> 1) {
696 case '&':
697 val = rb_reg_last_match(backref);
698 break;
699 case '`':
700 val = rb_reg_match_pre(backref);
701 break;
702 case '\'':
703 val = rb_reg_match_post(backref);
704 break;
705 case '+':
706 val = rb_reg_match_last(backref);
707 break;
708 default:
709 rb_bug("unexpected back-ref");
710 }
711 }
712 else {
713 val = rb_reg_nth_match((int)(type >> 1), backref);
714 }
715 }
716 return val;
717}
718
719static inline VALUE
720vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
721{
722 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
723 int nth = 0;
724
725 if (type & 0x01) {
726 switch (type >> 1) {
727 case '&':
728 case '`':
729 case '\'':
730 break;
731 case '+':
732 return rb_reg_last_defined(backref);
733 default:
734 rb_bug("unexpected back-ref");
735 }
736 }
737 else {
738 nth = (int)(type >> 1);
739 }
740 return rb_reg_nth_defined(nth, backref);
741}
742
743PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
745check_method_entry(VALUE obj, int can_be_svar)
746{
747 if (obj == Qfalse) return NULL;
748
749#if VM_CHECK_MODE > 0
750 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
751#endif
752
753 switch (imemo_type(obj)) {
754 case imemo_ment:
755 return (rb_callable_method_entry_t *)obj;
756 case imemo_cref:
757 return NULL;
758 case imemo_svar:
759 if (can_be_svar) {
760 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
761 }
762 default:
763#if VM_CHECK_MODE > 0
764 rb_bug("check_method_entry: svar should not be there:");
765#endif
766 return NULL;
767 }
768}
769
771rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
772{
773 const VALUE *ep = cfp->ep;
775
776 while (!VM_ENV_LOCAL_P(ep)) {
777 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
778 ep = VM_ENV_PREV_EP(ep);
779 }
780
781 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
782}
783
784static const rb_iseq_t *
785method_entry_iseqptr(const rb_callable_method_entry_t *me)
786{
787 switch (me->def->type) {
788 case VM_METHOD_TYPE_ISEQ:
789 return me->def->body.iseq.iseqptr;
790 default:
791 return NULL;
792 }
793}
794
795static rb_cref_t *
796method_entry_cref(const rb_callable_method_entry_t *me)
797{
798 switch (me->def->type) {
799 case VM_METHOD_TYPE_ISEQ:
800 return me->def->body.iseq.cref;
801 default:
802 return NULL;
803 }
804}
805
806#if VM_CHECK_MODE == 0
807PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
808#endif
809static rb_cref_t *
810check_cref(VALUE obj, int can_be_svar)
811{
812 if (obj == Qfalse) return NULL;
813
814#if VM_CHECK_MODE > 0
815 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
816#endif
817
818 switch (imemo_type(obj)) {
819 case imemo_ment:
820 return method_entry_cref((rb_callable_method_entry_t *)obj);
821 case imemo_cref:
822 return (rb_cref_t *)obj;
823 case imemo_svar:
824 if (can_be_svar) {
825 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
826 }
827 default:
828#if VM_CHECK_MODE > 0
829 rb_bug("check_method_entry: svar should not be there:");
830#endif
831 return NULL;
832 }
833}
834
835static inline rb_cref_t *
836vm_env_cref(const VALUE *ep)
837{
838 rb_cref_t *cref;
839
840 while (!VM_ENV_LOCAL_P(ep)) {
841 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
842 ep = VM_ENV_PREV_EP(ep);
843 }
844
845 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
846}
847
848static int
849is_cref(const VALUE v, int can_be_svar)
850{
851 if (RB_TYPE_P(v, T_IMEMO)) {
852 switch (imemo_type(v)) {
853 case imemo_cref:
854 return TRUE;
855 case imemo_svar:
856 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
857 default:
858 break;
859 }
860 }
861 return FALSE;
862}
863
864static int
865vm_env_cref_by_cref(const VALUE *ep)
866{
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
872}
873
874static rb_cref_t *
875cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
876{
877 const VALUE v = *vptr;
878 rb_cref_t *cref, *new_cref;
879
880 if (RB_TYPE_P(v, T_IMEMO)) {
881 switch (imemo_type(v)) {
882 case imemo_cref:
883 cref = (rb_cref_t *)v;
884 new_cref = vm_cref_dup(cref);
885 if (parent) {
886 RB_OBJ_WRITE(parent, vptr, new_cref);
887 }
888 else {
889 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
890 }
891 return (rb_cref_t *)new_cref;
892 case imemo_svar:
893 if (can_be_svar) {
894 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
895 }
896 /* fall through */
897 case imemo_ment:
898 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
899 default:
900 break;
901 }
902 }
903 return NULL;
904}
905
906static rb_cref_t *
907vm_cref_replace_with_duplicated_cref(const VALUE *ep)
908{
909 if (vm_env_cref_by_cref(ep)) {
910 rb_cref_t *cref;
911 VALUE envval;
912
913 while (!VM_ENV_LOCAL_P(ep)) {
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
916 return cref;
917 }
918 ep = VM_ENV_PREV_EP(ep);
919 }
920 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
921 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
922 }
923 else {
924 rb_bug("vm_cref_dup: unreachable");
925 }
926}
927
928static rb_cref_t *
929vm_get_cref(const VALUE *ep)
930{
931 rb_cref_t *cref = vm_env_cref(ep);
932
933 if (cref != NULL) {
934 return cref;
935 }
936 else {
937 rb_bug("vm_get_cref: unreachable");
938 }
939}
940
941rb_cref_t *
942rb_vm_get_cref(const VALUE *ep)
943{
944 return vm_get_cref(ep);
945}
946
947static rb_cref_t *
948vm_ec_cref(const rb_execution_context_t *ec)
949{
950 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
951
952 if (cfp == NULL) {
953 return NULL;
954 }
955 return vm_get_cref(cfp->ep);
956}
957
958static const rb_cref_t *
959vm_get_const_key_cref(const VALUE *ep)
960{
961 const rb_cref_t *cref = vm_get_cref(ep);
962 const rb_cref_t *key_cref = cref;
963
964 while (cref) {
965 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
966 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
967 return key_cref;
968 }
969 cref = CREF_NEXT(cref);
970 }
971
972 /* does not include singleton class */
973 return NULL;
974}
975
976void
977rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
978{
979 rb_cref_t *new_cref;
980
981 while (cref) {
982 if (CREF_CLASS(cref) == old_klass) {
983 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
984 *new_cref_ptr = new_cref;
985 return;
986 }
987 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
988 cref = CREF_NEXT(cref);
989 *new_cref_ptr = new_cref;
990 new_cref_ptr = &new_cref->next;
991 }
992 *new_cref_ptr = NULL;
993}
994
995static rb_cref_t *
996vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
997{
998 rb_cref_t *prev_cref = NULL;
999
1000 if (ep) {
1001 prev_cref = vm_env_cref(ep);
1002 }
1003 else {
1004 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1005
1006 if (cfp) {
1007 prev_cref = vm_env_cref(cfp->ep);
1008 }
1009 }
1010
1011 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1012}
1013
1014static inline VALUE
1015vm_get_cbase(const VALUE *ep)
1016{
1017 const rb_cref_t *cref = vm_get_cref(ep);
1018
1019 return CREF_CLASS_FOR_DEFINITION(cref);
1020}
1021
1022static inline VALUE
1023vm_get_const_base(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 while (cref) {
1028 if (!CREF_PUSHED_BY_EVAL(cref)) {
1029 return CREF_CLASS_FOR_DEFINITION(cref);
1030 }
1031 cref = CREF_NEXT(cref);
1032 }
1033
1034 return Qundef;
1035}
1036
1037static inline void
1038vm_check_if_namespace(VALUE klass)
1039{
1040 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1041 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1042 }
1043}
1044
1045static inline void
1046vm_ensure_not_refinement_module(VALUE self)
1047{
1048 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1049 rb_warn("not defined at the refinement, but at the outer class/module");
1050 }
1051}
1052
1053static inline VALUE
1054vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1055{
1056 return klass;
1057}
1058
1059static inline VALUE
1060vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1061{
1062 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1063 VALUE val;
1064
1065 if (NIL_P(orig_klass) && allow_nil) {
1066 /* in current lexical scope */
1067 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1068 const rb_cref_t *cref;
1069 VALUE klass = Qnil;
1070
1071 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1072 root_cref = CREF_NEXT(root_cref);
1073 }
1074 cref = root_cref;
1075 while (cref && CREF_NEXT(cref)) {
1076 if (CREF_PUSHED_BY_EVAL(cref)) {
1077 klass = Qnil;
1078 }
1079 else {
1080 klass = CREF_CLASS(cref);
1081 }
1082 cref = CREF_NEXT(cref);
1083
1084 if (!NIL_P(klass)) {
1085 VALUE av, am = 0;
1086 rb_const_entry_t *ce;
1087 search_continue:
1088 if ((ce = rb_const_lookup(klass, id))) {
1089 rb_const_warn_if_deprecated(ce, klass, id);
1090 val = ce->value;
1091 if (UNDEF_P(val)) {
1092 if (am == klass) break;
1093 am = klass;
1094 if (is_defined) return 1;
1095 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1096 rb_autoload_load(klass, id);
1097 goto search_continue;
1098 }
1099 else {
1100 if (is_defined) {
1101 return 1;
1102 }
1103 else {
1104 if (UNLIKELY(!rb_ractor_main_p())) {
1105 if (!rb_ractor_shareable_p(val)) {
1106 rb_raise(rb_eRactorIsolationError,
1107 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1108 }
1109 }
1110 return val;
1111 }
1112 }
1113 }
1114 }
1115 }
1116
1117 /* search self */
1118 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1119 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1120 }
1121 else {
1122 klass = CLASS_OF(ec->cfp->self);
1123 }
1124
1125 if (is_defined) {
1126 return rb_const_defined(klass, id);
1127 }
1128 else {
1129 return rb_const_get(klass, id);
1130 }
1131 }
1132 else {
1133 vm_check_if_namespace(orig_klass);
1134 if (is_defined) {
1135 return rb_public_const_defined_from(orig_klass, id);
1136 }
1137 else {
1138 return rb_public_const_get_from(orig_klass, id);
1139 }
1140 }
1141}
1142
1143VALUE
1144rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1145{
1146 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1147}
1148
1149static inline VALUE
1150vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1151{
1152 VALUE val = Qnil;
1153 int idx = 0;
1154 int allow_nil = TRUE;
1155 if (segments[0] == idNULL) {
1156 val = rb_cObject;
1157 idx++;
1158 allow_nil = FALSE;
1159 }
1160 while (segments[idx]) {
1161 ID id = segments[idx++];
1162 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1163 allow_nil = FALSE;
1164 }
1165 return val;
1166}
1167
1168
1169static inline VALUE
1170vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1171{
1172 VALUE klass;
1173
1174 if (!cref) {
1175 rb_bug("vm_get_cvar_base: no cref");
1176 }
1177
1178 while (CREF_NEXT(cref) &&
1179 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1180 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1181 cref = CREF_NEXT(cref);
1182 }
1183 if (top_level_raise && !CREF_NEXT(cref)) {
1184 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1185 }
1186
1187 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1188
1189 if (NIL_P(klass)) {
1190 rb_raise(rb_eTypeError, "no class variables available");
1191 }
1192 return klass;
1193}
1194
1195ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1196static inline void
1197fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1198{
1199 if (is_attr) {
1200 vm_cc_attr_index_set(cc, index, shape_id);
1201 }
1202 else {
1203 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1204 }
1205}
1206
1207#define ractor_incidental_shareable_p(cond, val) \
1208 (!(cond) || rb_ractor_shareable_p(val))
1209#define ractor_object_incidental_shareable_p(obj, val) \
1210 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1211
1212#define ATTR_INDEX_NOT_SET (attr_index_t)-1
1213
1214ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1215static inline VALUE
1216vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1217{
1218#if OPT_IC_FOR_IVAR
1219 VALUE val = Qundef;
1220 shape_id_t shape_id;
1221 VALUE * ivar_list;
1222
1223 if (SPECIAL_CONST_P(obj)) {
1224 return default_value;
1225 }
1226
1227#if SHAPE_IN_BASIC_FLAGS
1228 shape_id = RBASIC_SHAPE_ID(obj);
1229#endif
1230
1231 switch (BUILTIN_TYPE(obj)) {
1232 case T_OBJECT:
1233 ivar_list = ROBJECT_FIELDS(obj);
1234 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1235
1236#if !SHAPE_IN_BASIC_FLAGS
1237 shape_id = ROBJECT_SHAPE_ID(obj);
1238#endif
1239 break;
1240 case T_CLASS:
1241 case T_MODULE:
1242 {
1243 if (UNLIKELY(!rb_ractor_main_p())) {
1244 // For two reasons we can only use the fast path on the main
1245 // ractor.
1246 // First, only the main ractor is allowed to set ivars on classes
1247 // and modules. So we can skip locking.
1248 // Second, other ractors need to check the shareability of the
1249 // values returned from the class ivars.
1250
1251 if (default_value == Qundef) { // defined?
1252 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1253 }
1254 else {
1255 goto general_path;
1256 }
1257 }
1258
1259 ivar_list = RCLASS_PRIME_FIELDS(obj);
1260
1261#if !SHAPE_IN_BASIC_FLAGS
1262 shape_id = RCLASS_SHAPE_ID(obj);
1263#endif
1264
1265 break;
1266 }
1267 default:
1268 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1269 struct gen_fields_tbl *fields_tbl;
1270 rb_gen_fields_tbl_get(obj, id, &fields_tbl);
1271#if !SHAPE_IN_BASIC_FLAGS
1272 shape_id = fields_tbl->shape_id;
1273#endif
1274 ivar_list = fields_tbl->as.shape.fields;
1275 }
1276 else {
1277 return default_value;
1278 }
1279 }
1280
1281 shape_id_t cached_id;
1282 attr_index_t index;
1283
1284 if (is_attr) {
1285 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1286 }
1287 else {
1288 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1289 }
1290
1291 if (LIKELY(cached_id == shape_id)) {
1292 RUBY_ASSERT(!rb_shape_id_too_complex_p(cached_id));
1293
1294 if (index == ATTR_INDEX_NOT_SET) {
1295 return default_value;
1296 }
1297
1298 val = ivar_list[index];
1299#if USE_DEBUG_COUNTER
1300 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1301
1302 if (RB_TYPE_P(obj, T_OBJECT)) {
1303 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1304 }
1305#endif
1306 RUBY_ASSERT(!UNDEF_P(val));
1307 }
1308 else { // cache miss case
1309#if USE_DEBUG_COUNTER
1310 if (is_attr) {
1311 if (cached_id != INVALID_SHAPE_ID) {
1312 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1313 }
1314 else {
1315 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1316 }
1317 }
1318 else {
1319 if (cached_id != INVALID_SHAPE_ID) {
1320 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1321 }
1322 else {
1323 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1324 }
1325 }
1326 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1327
1328 if (RB_TYPE_P(obj, T_OBJECT)) {
1329 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1330 }
1331#endif
1332
1333 if (rb_shape_id_too_complex_p(shape_id)) {
1334 st_table *table = NULL;
1335 switch (BUILTIN_TYPE(obj)) {
1336 case T_CLASS:
1337 case T_MODULE:
1338 table = (st_table *)RCLASS_FIELDS_HASH(obj);
1339 break;
1340
1341 case T_OBJECT:
1342 table = ROBJECT_FIELDS_HASH(obj);
1343 break;
1344
1345 default: {
1346 struct gen_fields_tbl *fields_tbl;
1347 if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
1348 table = fields_tbl->as.complex.table;
1349 }
1350 break;
1351 }
1352 }
1353
1354 if (!table || !st_lookup(table, id, &val)) {
1355 val = default_value;
1356 }
1357 }
1358 else {
1359 shape_id_t previous_cached_id = cached_id;
1360 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1361 // This fills in the cache with the shared cache object.
1362 // "ent" is the shared cache object
1363 if (cached_id != previous_cached_id) {
1364 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1365 }
1366
1367 if (index == ATTR_INDEX_NOT_SET) {
1368 val = default_value;
1369 }
1370 else {
1371 // We fetched the ivar list above
1372 val = ivar_list[index];
1373 RUBY_ASSERT(!UNDEF_P(val));
1374 }
1375 }
1376 else {
1377 if (is_attr) {
1378 vm_cc_attr_index_initialize(cc, shape_id);
1379 }
1380 else {
1381 vm_ic_attr_index_initialize(ic, shape_id);
1382 }
1383
1384 val = default_value;
1385 }
1386 }
1387
1388 }
1389
1390 if (!UNDEF_P(default_value)) {
1391 RUBY_ASSERT(!UNDEF_P(val));
1392 }
1393
1394 return val;
1395
1396general_path:
1397#endif /* OPT_IC_FOR_IVAR */
1398 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1399
1400 if (is_attr) {
1401 return rb_attr_get(obj, id);
1402 }
1403 else {
1404 return rb_ivar_get(obj, id);
1405 }
1406}
1407
1408static void
1409populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1410{
1411 RUBY_ASSERT(!rb_shape_id_too_complex_p(next_shape_id));
1412
1413 // Cache population code
1414 if (is_attr) {
1415 vm_cc_attr_index_set(cc, index, next_shape_id);
1416 }
1417 else {
1418 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1419 }
1420}
1421
1422ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1423NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1424NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1425
1426static VALUE
1427vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1428{
1429#if OPT_IC_FOR_IVAR
1430 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1431
1432 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1433 rb_check_frozen(obj);
1434
1435 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1436
1437 shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1438
1439 if (!rb_shape_id_too_complex_p(next_shape_id)) {
1440 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1441 }
1442
1443 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1444 return val;
1445 }
1446#endif
1447 return rb_ivar_set(obj, id, val);
1448}
1449
1450static VALUE
1451vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1452{
1453 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1454}
1455
1456static VALUE
1457vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1458{
1459 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1460}
1461
1462NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1463static VALUE
1464vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1465{
1466#if SHAPE_IN_BASIC_FLAGS
1467 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1468#else
1469 shape_id_t shape_id = rb_generic_shape_id(obj);
1470#endif
1471
1472 struct gen_fields_tbl *fields_tbl = 0;
1473
1474 // Cache hit case
1475 if (shape_id == dest_shape_id) {
1476 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1477 }
1478 else if (dest_shape_id != INVALID_SHAPE_ID) {
1479 rb_shape_t *shape = RSHAPE(shape_id);
1480 rb_shape_t *dest_shape = RSHAPE(dest_shape_id);
1481
1482 if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1483 RUBY_ASSERT(index < dest_shape->capacity);
1484 }
1485 else {
1486 return Qundef;
1487 }
1488 }
1489 else {
1490 return Qundef;
1491 }
1492
1493 rb_gen_fields_tbl_get(obj, 0, &fields_tbl);
1494
1495 if (shape_id != dest_shape_id) {
1496#if SHAPE_IN_BASIC_FLAGS
1497 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1498#else
1499 fields_tbl->shape_id = dest_shape_id;
1500#endif
1501 }
1502
1503 RB_OBJ_WRITE(obj, &fields_tbl->as.shape.fields[index], val);
1504
1505 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1506
1507 return val;
1508}
1509
1510static inline VALUE
1511vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1512{
1513#if OPT_IC_FOR_IVAR
1514 switch (BUILTIN_TYPE(obj)) {
1515 case T_OBJECT:
1516 {
1517 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1518
1519 shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1520 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_id_too_complex_p(dest_shape_id));
1521
1522 if (LIKELY(shape_id == dest_shape_id)) {
1523 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1524 VM_ASSERT(!rb_ractor_shareable_p(obj));
1525 }
1526 else if (dest_shape_id != INVALID_SHAPE_ID) {
1527 rb_shape_t *shape = RSHAPE(shape_id);
1528 rb_shape_t *dest_shape = RSHAPE(dest_shape_id);
1529 shape_id_t source_shape_id = dest_shape->parent_id;
1530
1531 if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1532 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1533
1534 ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1535
1536 RUBY_ASSERT(rb_shape_get_next_iv_shape(source_shape_id, id) == dest_shape_id);
1537 RUBY_ASSERT(index < dest_shape->capacity);
1538 }
1539 else {
1540 break;
1541 }
1542 }
1543 else {
1544 break;
1545 }
1546
1547 VALUE *ptr = ROBJECT_FIELDS(obj);
1548
1549 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1550 RB_OBJ_WRITE(obj, &ptr[index], val);
1551
1552 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1553 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1554 return val;
1555 }
1556 break;
1557 case T_CLASS:
1558 case T_MODULE:
1559 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1560 default:
1561 break;
1562 }
1563
1564 return Qundef;
1565#endif /* OPT_IC_FOR_IVAR */
1566}
1567
1568static VALUE
1569update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1570{
1571 VALUE defined_class = 0;
1572 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1573
1574 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1575 defined_class = RBASIC(defined_class)->klass;
1576 }
1577
1578 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1579 if (!rb_cvc_tbl) {
1580 rb_bug("the cvc table should be set");
1581 }
1582
1583 VALUE ent_data;
1584 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1585 rb_bug("should have cvar cache entry");
1586 }
1587
1588 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1589
1590 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1591 ent->cref = cref;
1592 ic->entry = ent;
1593
1594 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1595 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1596 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1597 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1598
1599 return cvar_value;
1600}
1601
1602static inline VALUE
1603vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1604{
1605 const rb_cref_t *cref;
1606 cref = vm_get_cref(GET_EP());
1607
1608 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1609 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1610
1611 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1612 RUBY_ASSERT(!UNDEF_P(v));
1613
1614 return v;
1615 }
1616
1617 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1618
1619 return update_classvariable_cache(iseq, klass, id, cref, ic);
1620}
1621
1622VALUE
1623rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1624{
1625 return vm_getclassvariable(iseq, cfp, id, ic);
1626}
1627
1628static inline void
1629vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1630{
1631 const rb_cref_t *cref;
1632 cref = vm_get_cref(GET_EP());
1633
1634 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1635 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1636
1637 rb_class_ivar_set(ic->entry->class_value, id, val);
1638 return;
1639 }
1640
1641 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1642
1643 rb_cvar_set(klass, id, val);
1644
1645 update_classvariable_cache(iseq, klass, id, cref, ic);
1646}
1647
1648void
1649rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1650{
1651 vm_setclassvariable(iseq, cfp, id, val, ic);
1652}
1653
1654static inline VALUE
1655vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1656{
1657 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1658}
1659
1660static inline void
1661vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1662{
1663 if (RB_SPECIAL_CONST_P(obj)) {
1665 return;
1666 }
1667
1668 shape_id_t dest_shape_id;
1669 attr_index_t index;
1670 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1671
1672 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1673 switch (BUILTIN_TYPE(obj)) {
1674 case T_OBJECT:
1675 case T_CLASS:
1676 case T_MODULE:
1677 break;
1678 default:
1679 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1680 return;
1681 }
1682 }
1683 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1684 }
1685}
1686
1687void
1688rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1689{
1690 vm_setinstancevariable(iseq, obj, id, val, ic);
1691}
1692
1693static VALUE
1694vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1695{
1696 /* continue throw */
1697
1698 if (FIXNUM_P(err)) {
1699 ec->tag->state = RUBY_TAG_FATAL;
1700 }
1701 else if (SYMBOL_P(err)) {
1702 ec->tag->state = TAG_THROW;
1703 }
1704 else if (THROW_DATA_P(err)) {
1705 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1706 }
1707 else {
1708 ec->tag->state = TAG_RAISE;
1709 }
1710 return err;
1711}
1712
1713static VALUE
1714vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1715 const int flag, const VALUE throwobj)
1716{
1717 const rb_control_frame_t *escape_cfp = NULL;
1718 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1719
1720 if (flag != 0) {
1721 /* do nothing */
1722 }
1723 else if (state == TAG_BREAK) {
1724 int is_orphan = 1;
1725 const VALUE *ep = GET_EP();
1726 const rb_iseq_t *base_iseq = GET_ISEQ();
1727 escape_cfp = reg_cfp;
1728
1729 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1730 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1731 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1732 ep = escape_cfp->ep;
1733 base_iseq = escape_cfp->iseq;
1734 }
1735 else {
1736 ep = VM_ENV_PREV_EP(ep);
1737 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1738 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1739 VM_ASSERT(escape_cfp->iseq == base_iseq);
1740 }
1741 }
1742
1743 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1744 /* lambda{... break ...} */
1745 is_orphan = 0;
1746 state = TAG_RETURN;
1747 }
1748 else {
1749 ep = VM_ENV_PREV_EP(ep);
1750
1751 while (escape_cfp < eocfp) {
1752 if (escape_cfp->ep == ep) {
1753 const rb_iseq_t *const iseq = escape_cfp->iseq;
1754 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1755 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1756 unsigned int i;
1757
1758 if (!ct) break;
1759 for (i=0; i < ct->size; i++) {
1760 const struct iseq_catch_table_entry *const entry =
1761 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1762
1763 if (entry->type == CATCH_TYPE_BREAK &&
1764 entry->iseq == base_iseq &&
1765 entry->start < epc && entry->end >= epc) {
1766 if (entry->cont == epc) { /* found! */
1767 is_orphan = 0;
1768 }
1769 break;
1770 }
1771 }
1772 break;
1773 }
1774
1775 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1776 }
1777 }
1778
1779 if (is_orphan) {
1780 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1781 }
1782 }
1783 else if (state == TAG_RETRY) {
1784 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1785
1786 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1787 }
1788 else if (state == TAG_RETURN) {
1789 const VALUE *current_ep = GET_EP();
1790 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1791 int in_class_frame = 0;
1792 int toplevel = 1;
1793 escape_cfp = reg_cfp;
1794
1795 // find target_lep, target_ep
1796 while (!VM_ENV_LOCAL_P(ep)) {
1797 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1798 target_ep = ep;
1799 }
1800 ep = VM_ENV_PREV_EP(ep);
1801 }
1802 target_lep = ep;
1803
1804 while (escape_cfp < eocfp) {
1805 const VALUE *lep = VM_CF_LEP(escape_cfp);
1806
1807 if (!target_lep) {
1808 target_lep = lep;
1809 }
1810
1811 if (lep == target_lep &&
1812 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1813 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1814 in_class_frame = 1;
1815 target_lep = 0;
1816 }
1817
1818 if (lep == target_lep) {
1819 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1820 toplevel = 0;
1821 if (in_class_frame) {
1822 /* lambda {class A; ... return ...; end} */
1823 goto valid_return;
1824 }
1825 else {
1826 const VALUE *tep = current_ep;
1827
1828 while (target_lep != tep) {
1829 if (escape_cfp->ep == tep) {
1830 /* in lambda */
1831 if (tep == target_ep) {
1832 goto valid_return;
1833 }
1834 else {
1835 goto unexpected_return;
1836 }
1837 }
1838 tep = VM_ENV_PREV_EP(tep);
1839 }
1840 }
1841 }
1842 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1843 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1844 case ISEQ_TYPE_TOP:
1845 case ISEQ_TYPE_MAIN:
1846 if (toplevel) {
1847 if (in_class_frame) goto unexpected_return;
1848 if (target_ep == NULL) {
1849 goto valid_return;
1850 }
1851 else {
1852 goto unexpected_return;
1853 }
1854 }
1855 break;
1856 case ISEQ_TYPE_EVAL: {
1857 const rb_iseq_t *is = escape_cfp->iseq;
1858 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1859 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1860 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1861 t = ISEQ_BODY(is)->type;
1862 }
1863 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1864 break;
1865 }
1866 case ISEQ_TYPE_CLASS:
1867 toplevel = 0;
1868 break;
1869 default:
1870 break;
1871 }
1872 }
1873 }
1874
1875 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1876 if (target_ep == NULL) {
1877 goto valid_return;
1878 }
1879 else {
1880 goto unexpected_return;
1881 }
1882 }
1883
1884 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1885 }
1886 unexpected_return:;
1887 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1888
1889 valid_return:;
1890 /* do nothing */
1891 }
1892 else {
1893 rb_bug("isns(throw): unsupported throw type");
1894 }
1895
1896 ec->tag->state = state;
1897 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1898}
1899
1900static VALUE
1901vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1902 rb_num_t throw_state, VALUE throwobj)
1903{
1904 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1905 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1906
1907 if (state != 0) {
1908 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1909 }
1910 else {
1911 return vm_throw_continue(ec, throwobj);
1912 }
1913}
1914
1915VALUE
1916rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1917{
1918 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1919}
1920
1921static inline void
1922vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1923{
1924 int is_splat = flag & 0x01;
1925 const VALUE *ptr;
1926 rb_num_t len;
1927 const VALUE obj = ary;
1928
1929 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1930 ary = obj;
1931 ptr = &ary;
1932 len = 1;
1933 }
1934 else {
1935 ptr = RARRAY_CONST_PTR(ary);
1936 len = (rb_num_t)RARRAY_LEN(ary);
1937 }
1938
1939 if (num + is_splat == 0) {
1940 /* no space left on stack */
1941 }
1942 else if (flag & 0x02) {
1943 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1944 rb_num_t i = 0, j;
1945
1946 if (len < num) {
1947 for (i = 0; i < num - len; i++) {
1948 *cfp->sp++ = Qnil;
1949 }
1950 }
1951
1952 for (j = 0; i < num; i++, j++) {
1953 VALUE v = ptr[len - j - 1];
1954 *cfp->sp++ = v;
1955 }
1956
1957 if (is_splat) {
1958 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1959 }
1960 }
1961 else {
1962 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1963 if (is_splat) {
1964 if (num > len) {
1965 *cfp->sp++ = rb_ary_new();
1966 }
1967 else {
1968 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1969 }
1970 }
1971
1972 if (num > len) {
1973 rb_num_t i = 0;
1974 for (; i < num - len; i++) {
1975 *cfp->sp++ = Qnil;
1976 }
1977
1978 for (rb_num_t j = 0; i < num; i++, j++) {
1979 *cfp->sp++ = ptr[len - j - 1];
1980 }
1981 }
1982 else {
1983 for (rb_num_t j = 0; j < num; j++) {
1984 *cfp->sp++ = ptr[num - j - 1];
1985 }
1986 }
1987 }
1988
1989 RB_GC_GUARD(ary);
1990}
1991
1992static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1993
1994static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1995
1996static struct rb_class_cc_entries *
1997vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1998{
1999 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
2000#if VM_CHECK_MODE > 0
2001 ccs->debug_sig = ~(VALUE)ccs;
2002#endif
2003 ccs->capa = 0;
2004 ccs->len = 0;
2005 ccs->cme = cme;
2006 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2007 ccs->entries = NULL;
2008
2009 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2010 RB_OBJ_WRITTEN(klass, Qundef, cme);
2011 return ccs;
2012}
2013
2014static void
2015vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2016{
2017 if (! vm_cc_markable(cc)) {
2018 return;
2019 }
2020
2021 if (UNLIKELY(ccs->len == ccs->capa)) {
2022 if (ccs->capa == 0) {
2023 ccs->capa = 1;
2024 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2025 }
2026 else {
2027 ccs->capa *= 2;
2028 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2029 }
2030 }
2031 VM_ASSERT(ccs->len < ccs->capa);
2032
2033 const int pos = ccs->len++;
2034 ccs->entries[pos].argc = vm_ci_argc(ci);
2035 ccs->entries[pos].flag = vm_ci_flag(ci);
2036 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2037
2038 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2039 // for tuning
2040 // vm_mtbl_dump(klass, 0);
2041 }
2042}
2043
2044#if VM_CHECK_MODE > 0
2045void
2046rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2047{
2048 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2049 for (int i=0; i<ccs->len; i++) {
2050 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2051 ccs->entries[i].flag,
2052 ccs->entries[i].argc);
2053 rp(ccs->entries[i].cc);
2054 }
2055}
2056
2057static int
2058vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2059{
2060 VM_ASSERT(vm_ccs_p(ccs));
2061 VM_ASSERT(ccs->len <= ccs->capa);
2062
2063 for (int i=0; i<ccs->len; i++) {
2064 const struct rb_callcache *cc = ccs->entries[i].cc;
2065
2066 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2067 VM_ASSERT(vm_cc_class_check(cc, klass));
2068 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2069 VM_ASSERT(!vm_cc_super_p(cc));
2070 VM_ASSERT(!vm_cc_refinement_p(cc));
2071 }
2072 return TRUE;
2073}
2074#endif
2075
2076const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2077
2078static const struct rb_callcache *
2079vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2080{
2081 const ID mid = vm_ci_mid(ci);
2082 struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2083 struct rb_class_cc_entries *ccs = NULL;
2084 VALUE ccs_data;
2085
2086 if (cc_tbl) {
2087 // CCS data is keyed on method id, so we don't need the method id
2088 // for doing comparisons in the `for` loop below.
2089 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2090 ccs = (struct rb_class_cc_entries *)ccs_data;
2091 const int ccs_len = ccs->len;
2092
2093 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2094 rb_vm_ccs_free(ccs);
2095 rb_id_table_delete(cc_tbl, mid);
2096 ccs = NULL;
2097 }
2098 else {
2099 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2100
2101 // We already know the method id is correct because we had
2102 // to look up the ccs_data by method id. All we need to
2103 // compare is argc and flag
2104 unsigned int argc = vm_ci_argc(ci);
2105 unsigned int flag = vm_ci_flag(ci);
2106
2107 for (int i=0; i<ccs_len; i++) {
2108 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2109 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2110 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2111
2112 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2113
2114 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2115 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2116
2117 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2118 VM_ASSERT(ccs_cc->klass == klass);
2119 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2120
2121 return ccs_cc;
2122 }
2123 }
2124 }
2125 }
2126 }
2127 else {
2128 cc_tbl = rb_id_table_create(2);
2129 RCLASS_WRITE_CC_TBL(klass, cc_tbl);
2130 }
2131
2132 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2133
2134 const rb_callable_method_entry_t *cme;
2135
2136 if (ccs) {
2137 cme = ccs->cme;
2138 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2139
2140 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2141 }
2142 else {
2143 cme = rb_callable_method_entry(klass, mid);
2144 }
2145
2146 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2147
2148 if (cme == NULL) {
2149 // undef or not found: can't cache the information
2150 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2151 return &vm_empty_cc;
2152 }
2153
2154 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2155
2156 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2157
2158 if (ccs == NULL) {
2159 VM_ASSERT(cc_tbl != NULL);
2160
2161 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2162 // rb_callable_method_entry() prepares ccs.
2163 ccs = (struct rb_class_cc_entries *)ccs_data;
2164 }
2165 else {
2166 // TODO: required?
2167 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2168 }
2169 }
2170
2171 cme = rb_check_overloaded_cme(cme, ci);
2172
2173 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2174 vm_ccs_push(klass, ccs, ci, cc);
2175
2176 VM_ASSERT(vm_cc_cme(cc) != NULL);
2177 VM_ASSERT(cme->called_id == mid);
2178 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2179
2180 return cc;
2181}
2182
2183const struct rb_callcache *
2184rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2185{
2186 const struct rb_callcache *cc;
2187
2188 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2189
2190 RB_VM_LOCK_ENTER();
2191 {
2192 cc = vm_search_cc(klass, ci);
2193
2194 VM_ASSERT(cc);
2195 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2196 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2197 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2198 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2199 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2200 }
2201 RB_VM_LOCK_LEAVE();
2202
2203 return cc;
2204}
2205
2206static const struct rb_callcache *
2207vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2208{
2209#if USE_DEBUG_COUNTER
2210 const struct rb_callcache *old_cc = cd->cc;
2211#endif
2212
2213 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2214
2215#if OPT_INLINE_METHOD_CACHE
2216 cd->cc = cc;
2217
2218 const struct rb_callcache *empty_cc = &vm_empty_cc;
2219 if (cd_owner && cc != empty_cc) {
2220 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2221 }
2222
2223#if USE_DEBUG_COUNTER
2224 if (!old_cc || old_cc == empty_cc) {
2225 // empty
2226 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2227 }
2228 else if (old_cc == cc) {
2229 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2230 }
2231 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2232 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2233 }
2234 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2235 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2236 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2237 }
2238 else {
2239 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2240 }
2241#endif
2242#endif // OPT_INLINE_METHOD_CACHE
2243
2244 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2245 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2246
2247 return cc;
2248}
2249
2250ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2251static const struct rb_callcache *
2252vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2253{
2254 const struct rb_callcache *cc = cd->cc;
2255
2256#if OPT_INLINE_METHOD_CACHE
2257 if (LIKELY(vm_cc_class_check(cc, klass))) {
2258 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2259 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2260 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2261 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2262 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2263 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2264
2265 return cc;
2266 }
2267 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2268 }
2269 else {
2270 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2271 }
2272#endif
2273
2274 return vm_search_method_slowpath0(cd_owner, cd, klass);
2275}
2276
2277static const struct rb_callcache *
2278vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2279{
2280 VALUE klass = CLASS_OF(recv);
2281 VM_ASSERT(klass != Qfalse);
2282 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2283
2284 return vm_search_method_fastpath(cd_owner, cd, klass);
2285}
2286
2287#if __has_attribute(transparent_union)
2288typedef union {
2289 VALUE (*anyargs)(ANYARGS);
2290 VALUE (*f00)(VALUE);
2291 VALUE (*f01)(VALUE, VALUE);
2292 VALUE (*f02)(VALUE, VALUE, VALUE);
2293 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2294 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2295 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2296 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2297 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2306 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2307} __attribute__((__transparent_union__)) cfunc_type;
2308# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2309#else
2310typedef VALUE (*cfunc_type)(ANYARGS);
2311# define make_cfunc_type(f) (cfunc_type)(f)
2312#endif
2313
2314static inline int
2315check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2316{
2317 if (! me) {
2318 return false;
2319 }
2320 else {
2321 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2322 VM_ASSERT(callable_method_entry_p(me));
2323 VM_ASSERT(me->def);
2324 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2325 return false;
2326 }
2327 else {
2328#if __has_attribute(transparent_union)
2329 return me->def->body.cfunc.func == func.anyargs;
2330#else
2331 return me->def->body.cfunc.func == func;
2332#endif
2333 }
2334 }
2335}
2336
2337static inline int
2338check_method_basic_definition(const rb_callable_method_entry_t *me)
2339{
2340 return me && METHOD_ENTRY_BASIC(me);
2341}
2342
2343static inline int
2344vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2345{
2346 VM_ASSERT(iseq != NULL);
2347 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2348 return check_cfunc(vm_cc_cme(cc), func);
2349}
2350
2351#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2352#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2353
2354#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2355
2356static inline bool
2357FIXNUM_2_P(VALUE a, VALUE b)
2358{
2359 /* FIXNUM_P(a) && FIXNUM_P(b)
2360 * == ((a & 1) && (b & 1))
2361 * == a & b & 1 */
2362 SIGNED_VALUE x = a;
2363 SIGNED_VALUE y = b;
2364 SIGNED_VALUE z = x & y & 1;
2365 return z == 1;
2366}
2367
2368static inline bool
2369FLONUM_2_P(VALUE a, VALUE b)
2370{
2371#if USE_FLONUM
2372 /* FLONUM_P(a) && FLONUM_P(b)
2373 * == ((a & 3) == 2) && ((b & 3) == 2)
2374 * == ! ((a ^ 2) | (b ^ 2) & 3)
2375 */
2376 SIGNED_VALUE x = a;
2377 SIGNED_VALUE y = b;
2378 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2379 return !z;
2380#else
2381 return false;
2382#endif
2383}
2384
2385static VALUE
2386opt_equality_specialized(VALUE recv, VALUE obj)
2387{
2388 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2389 goto compare_by_identity;
2390 }
2391 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2392 goto compare_by_identity;
2393 }
2394 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2395 goto compare_by_identity;
2396 }
2397 else if (SPECIAL_CONST_P(recv)) {
2398 //
2399 }
2400 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2401 double a = RFLOAT_VALUE(recv);
2402 double b = RFLOAT_VALUE(obj);
2403
2404#if MSC_VERSION_BEFORE(1300)
2405 if (isnan(a)) {
2406 return Qfalse;
2407 }
2408 else if (isnan(b)) {
2409 return Qfalse;
2410 }
2411 else
2412#endif
2413 return RBOOL(a == b);
2414 }
2415 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2416 if (recv == obj) {
2417 return Qtrue;
2418 }
2419 else if (RB_TYPE_P(obj, T_STRING)) {
2420 return rb_str_eql_internal(obj, recv);
2421 }
2422 }
2423 return Qundef;
2424
2425 compare_by_identity:
2426 return RBOOL(recv == obj);
2427}
2428
2429static VALUE
2430opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2431{
2432 VM_ASSERT(cd_owner != NULL);
2433
2434 VALUE val = opt_equality_specialized(recv, obj);
2435 if (!UNDEF_P(val)) return val;
2436
2437 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2438 return Qundef;
2439 }
2440 else {
2441 return RBOOL(recv == obj);
2442 }
2443}
2444
2445#undef EQ_UNREDEFINED_P
2446
2447static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2448NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2449
2450static VALUE
2451opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2452{
2453 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2454
2455 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2456 return RBOOL(recv == obj);
2457 }
2458 else {
2459 return Qundef;
2460 }
2461}
2462
2463static VALUE
2464opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2465{
2466 VALUE val = opt_equality_specialized(recv, obj);
2467 if (!UNDEF_P(val)) {
2468 return val;
2469 }
2470 else {
2471 return opt_equality_by_mid_slowpath(recv, obj, mid);
2472 }
2473}
2474
2475VALUE
2476rb_equal_opt(VALUE obj1, VALUE obj2)
2477{
2478 return opt_equality_by_mid(obj1, obj2, idEq);
2479}
2480
2481VALUE
2482rb_eql_opt(VALUE obj1, VALUE obj2)
2483{
2484 return opt_equality_by_mid(obj1, obj2, idEqlP);
2485}
2486
2487extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2488extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2489
2490static VALUE
2491check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2492{
2493 switch (type) {
2494 case VM_CHECKMATCH_TYPE_WHEN:
2495 return pattern;
2496 case VM_CHECKMATCH_TYPE_RESCUE:
2497 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2498 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2499 }
2500 /* fall through */
2501 case VM_CHECKMATCH_TYPE_CASE: {
2502 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2503 }
2504 default:
2505 rb_bug("check_match: unreachable");
2506 }
2507}
2508
2509
2510#if MSC_VERSION_BEFORE(1300)
2511#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2512#else
2513#define CHECK_CMP_NAN(a, b) /* do nothing */
2514#endif
2515
2516static inline VALUE
2517double_cmp_lt(double a, double b)
2518{
2519 CHECK_CMP_NAN(a, b);
2520 return RBOOL(a < b);
2521}
2522
2523static inline VALUE
2524double_cmp_le(double a, double b)
2525{
2526 CHECK_CMP_NAN(a, b);
2527 return RBOOL(a <= b);
2528}
2529
2530static inline VALUE
2531double_cmp_gt(double a, double b)
2532{
2533 CHECK_CMP_NAN(a, b);
2534 return RBOOL(a > b);
2535}
2536
2537static inline VALUE
2538double_cmp_ge(double a, double b)
2539{
2540 CHECK_CMP_NAN(a, b);
2541 return RBOOL(a >= b);
2542}
2543
2544// Copied by vm_dump.c
2545static inline VALUE *
2546vm_base_ptr(const rb_control_frame_t *cfp)
2547{
2548 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2549
2550 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2551 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2552
2553 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2554 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2555 int params = ISEQ_BODY(cfp->iseq)->param.size;
2556
2557 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2558 bp += vm_ci_argc(ci);
2559 }
2560
2561 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2562 /* adjust `self' */
2563 bp += 1;
2564 }
2565#if VM_DEBUG_BP_CHECK
2566 if (bp != cfp->bp_check) {
2567 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2568 (long)(cfp->bp_check - GET_EC()->vm_stack),
2569 (long)(bp - GET_EC()->vm_stack));
2570 rb_bug("vm_base_ptr: unreachable");
2571 }
2572#endif
2573 return bp;
2574 }
2575 else {
2576 return NULL;
2577 }
2578}
2579
2580VALUE *
2581rb_vm_base_ptr(const rb_control_frame_t *cfp)
2582{
2583 return vm_base_ptr(cfp);
2584}
2585
2586/* method call processes with call_info */
2587
2588#include "vm_args.c"
2589
2590static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2591ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2592static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2593static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2594static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2595static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2596static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2597
2598static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2599
2600static VALUE
2601vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2602{
2603 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2604
2605 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2606}
2607
2608static VALUE
2609vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2610{
2611 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2612
2613 const struct rb_callcache *cc = calling->cc;
2614 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2615 int param = ISEQ_BODY(iseq)->param.size;
2616 int local = ISEQ_BODY(iseq)->local_table_size;
2617 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2618}
2619
2620bool
2621rb_simple_iseq_p(const rb_iseq_t *iseq)
2622{
2623 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2624 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2627 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2628 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2629 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2630 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2631}
2632
2633bool
2634rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2635{
2636 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2637 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2638 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2639 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2640 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2641 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2642 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2643 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2644}
2645
2646bool
2647rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2648{
2649 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2650 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2651 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2652 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2653 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2654 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2655 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2656}
2657
2658#define ALLOW_HEAP_ARGV (-2)
2659#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2660
2661static inline bool
2662vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2663{
2664 vm_check_canary(GET_EC(), cfp->sp);
2665 bool ret = false;
2666
2667 if (!NIL_P(ary)) {
2668 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2669 long len = RARRAY_LEN(ary);
2670 int argc = calling->argc;
2671
2672 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2673 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2674 * a temporary array, instead of trying to keeping arguments on the VM stack.
2675 */
2676 VALUE *argv = cfp->sp - argc;
2677 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2678 rb_ary_cat(argv_ary, argv, argc);
2679 rb_ary_cat(argv_ary, ptr, len);
2680 cfp->sp -= argc - 1;
2681 cfp->sp[-1] = argv_ary;
2682 calling->argc = 1;
2683 calling->heap_argv = argv_ary;
2684 RB_GC_GUARD(ary);
2685 }
2686 else {
2687 long i;
2688
2689 if (max_args >= 0 && len + argc > max_args) {
2690 /* If only a given max_args is allowed, copy up to max args.
2691 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2692 * where additional arguments are ignored.
2693 *
2694 * Also, copy up to one more argument than the maximum,
2695 * in case it is an empty keyword hash that will be removed.
2696 */
2697 calling->argc += len - (max_args - argc + 1);
2698 len = max_args - argc + 1;
2699 ret = true;
2700 }
2701 else {
2702 /* Unset heap_argv if set originally. Can happen when
2703 * forwarding modified arguments, where heap_argv was used
2704 * originally, but heap_argv not supported by the forwarded
2705 * method in all cases.
2706 */
2707 calling->heap_argv = 0;
2708 }
2709 CHECK_VM_STACK_OVERFLOW(cfp, len);
2710
2711 for (i = 0; i < len; i++) {
2712 *cfp->sp++ = ptr[i];
2713 }
2714 calling->argc += i;
2715 }
2716 }
2717
2718 return ret;
2719}
2720
2721static inline void
2722vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2723{
2724 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2725 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2726 const VALUE h = rb_hash_new_with_size(kw_len);
2727 VALUE *sp = cfp->sp;
2728 int i;
2729
2730 for (i=0; i<kw_len; i++) {
2731 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2732 }
2733 (sp-kw_len)[0] = h;
2734
2735 cfp->sp -= kw_len - 1;
2736 calling->argc -= kw_len - 1;
2737 calling->kw_splat = 1;
2738}
2739
2740static inline VALUE
2741vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2742{
2743 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2744 if (keyword_hash != Qnil) {
2745 /* Convert a non-hash keyword splat to a new hash */
2746 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2747 }
2748 }
2749 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2750 /* Convert a hash keyword splat to a new hash unless
2751 * a mutable keyword splat was passed.
2752 * Skip allocating new hash for empty keyword splat, as empty
2753 * keyword splat will be ignored by both callers.
2754 */
2755 keyword_hash = rb_hash_dup(keyword_hash);
2756 }
2757 return keyword_hash;
2758}
2759
2760static inline void
2761CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2762 struct rb_calling_info *restrict calling,
2763 const struct rb_callinfo *restrict ci, int max_args)
2764{
2765 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2766 if (IS_ARGS_KW_SPLAT(ci)) {
2767 // f(*a, **kw)
2768 VM_ASSERT(calling->kw_splat == 1);
2769
2770 cfp->sp -= 2;
2771 calling->argc -= 2;
2772 VALUE ary = cfp->sp[0];
2773 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2774
2775 // splat a
2776 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2777
2778 // put kw
2779 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2780 if (UNLIKELY(calling->heap_argv)) {
2781 rb_ary_push(calling->heap_argv, kwh);
2782 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2783 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2784 calling->kw_splat = 0;
2785 }
2786 }
2787 else {
2788 cfp->sp[0] = kwh;
2789 cfp->sp++;
2790 calling->argc++;
2791
2792 VM_ASSERT(calling->kw_splat == 1);
2793 }
2794 }
2795 else {
2796 calling->kw_splat = 0;
2797 }
2798 }
2799 else {
2800 // f(*a)
2801 VM_ASSERT(calling->kw_splat == 0);
2802
2803 cfp->sp -= 1;
2804 calling->argc -= 1;
2805 VALUE ary = cfp->sp[0];
2806
2807 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2808 goto check_keyword;
2809 }
2810
2811 // check the last argument
2812 VALUE last_hash, argv_ary;
2813 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2814 if (!IS_ARGS_KEYWORD(ci) &&
2815 RARRAY_LEN(argv_ary) > 0 &&
2816 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2817 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2818
2819 rb_ary_pop(argv_ary);
2820 if (!RHASH_EMPTY_P(last_hash)) {
2821 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2822 calling->kw_splat = 1;
2823 }
2824 }
2825 }
2826 else {
2827check_keyword:
2828 if (!IS_ARGS_KEYWORD(ci) &&
2829 calling->argc > 0 &&
2830 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2831 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2832
2833 if (RHASH_EMPTY_P(last_hash)) {
2834 calling->argc--;
2835 cfp->sp -= 1;
2836 }
2837 else {
2838 cfp->sp[-1] = rb_hash_dup(last_hash);
2839 calling->kw_splat = 1;
2840 }
2841 }
2842 }
2843 }
2844 }
2845 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2846 // f(**kw)
2847 VM_ASSERT(calling->kw_splat == 1);
2848 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2849
2850 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2851 cfp->sp--;
2852 calling->argc--;
2853 calling->kw_splat = 0;
2854 }
2855 else {
2856 cfp->sp[-1] = kwh;
2857 }
2858 }
2859 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2860 // f(k1:1, k2:2)
2861 VM_ASSERT(calling->kw_splat == 0);
2862
2863 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2864 * by creating a keyword hash.
2865 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2866 */
2867 vm_caller_setup_arg_kw(cfp, calling, ci);
2868 }
2869}
2870
2871#define USE_OPT_HIST 0
2872
2873#if USE_OPT_HIST
2874#define OPT_HIST_MAX 64
2875static int opt_hist[OPT_HIST_MAX+1];
2876
2877__attribute__((destructor))
2878static void
2879opt_hist_show_results_at_exit(void)
2880{
2881 for (int i=0; i<OPT_HIST_MAX; i++) {
2882 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2883 }
2884}
2885#endif
2886
2887static VALUE
2888vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2889 struct rb_calling_info *calling)
2890{
2891 const struct rb_callcache *cc = calling->cc;
2892 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2893 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2894 const int opt = calling->argc - lead_num;
2895 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2896 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2897 const int param = ISEQ_BODY(iseq)->param.size;
2898 const int local = ISEQ_BODY(iseq)->local_table_size;
2899 const int delta = opt_num - opt;
2900
2901 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2902
2903#if USE_OPT_HIST
2904 if (opt_pc < OPT_HIST_MAX) {
2905 opt_hist[opt]++;
2906 }
2907 else {
2908 opt_hist[OPT_HIST_MAX]++;
2909 }
2910#endif
2911
2912 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2913}
2914
2915static VALUE
2916vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2917 struct rb_calling_info *calling)
2918{
2919 const struct rb_callcache *cc = calling->cc;
2920 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2921 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2922 const int opt = calling->argc - lead_num;
2923 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2924
2925 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2926
2927#if USE_OPT_HIST
2928 if (opt_pc < OPT_HIST_MAX) {
2929 opt_hist[opt]++;
2930 }
2931 else {
2932 opt_hist[OPT_HIST_MAX]++;
2933 }
2934#endif
2935
2936 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2937}
2938
2939static void
2940args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2941 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2942 VALUE *const locals);
2943
2944static VALUE
2945vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2946 struct rb_calling_info *calling)
2947{
2948 const struct rb_callcache *cc = calling->cc;
2949 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2950 int param_size = ISEQ_BODY(iseq)->param.size;
2951 int local_size = ISEQ_BODY(iseq)->local_table_size;
2952
2953 // Setting up local size and param size
2954 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2955
2956 local_size = local_size + vm_ci_argc(calling->cd->ci);
2957 param_size = param_size + vm_ci_argc(calling->cd->ci);
2958
2959 cfp->sp[0] = (VALUE)calling->cd->ci;
2960
2961 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2962}
2963
2964static VALUE
2965vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2966 struct rb_calling_info *calling)
2967{
2968 const struct rb_callinfo *ci = calling->cd->ci;
2969 const struct rb_callcache *cc = calling->cc;
2970
2971 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2972 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2973
2974 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2975 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2976 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2977 const int ci_kw_len = kw_arg->keyword_len;
2978 const VALUE * const ci_keywords = kw_arg->keywords;
2979 VALUE *argv = cfp->sp - calling->argc;
2980 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2981 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2982 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2983 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2984 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2985
2986 int param = ISEQ_BODY(iseq)->param.size;
2987 int local = ISEQ_BODY(iseq)->local_table_size;
2988 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2989}
2990
2991static VALUE
2992vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2993 struct rb_calling_info *calling)
2994{
2995 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2996 const struct rb_callcache *cc = calling->cc;
2997
2998 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2999 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3000
3001 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3002 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3003 VALUE * const argv = cfp->sp - calling->argc;
3004 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3005
3006 int i;
3007 for (i=0; i<kw_param->num; i++) {
3008 klocals[i] = kw_param->default_values[i];
3009 }
3010 klocals[i] = INT2FIX(0); // kw specify flag
3011 // NOTE:
3012 // nobody check this value, but it should be cleared because it can
3013 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3014
3015 int param = ISEQ_BODY(iseq)->param.size;
3016 int local = ISEQ_BODY(iseq)->local_table_size;
3017 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3018}
3019
3020static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3021
3022static VALUE
3023vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3024 struct rb_calling_info *calling)
3025{
3026 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3027 cfp->sp -= (calling->argc + 1);
3028 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3029 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3030}
3031
3032VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3033
3034static void
3035warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3036{
3037 rb_vm_t *vm = GET_VM();
3038 set_table *dup_check_table = vm->unused_block_warning_table;
3039 st_data_t key;
3040 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3041
3042 union {
3043 VALUE v;
3044 unsigned char b[SIZEOF_VALUE];
3045 } k1 = {
3046 .v = (VALUE)pc,
3047 }, k2 = {
3048 .v = (VALUE)cme->def,
3049 };
3050
3051 // relax check
3052 if (!strict_unused_block) {
3053 key = (st_data_t)cme->def->original_id;
3054
3055 if (set_lookup(dup_check_table, key)) {
3056 return;
3057 }
3058 }
3059
3060 // strict check
3061 // make unique key from pc and me->def pointer
3062 key = 0;
3063 for (int i=0; i<SIZEOF_VALUE; i++) {
3064 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3065 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3066 }
3067
3068 if (0) {
3069 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3070 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3071 fprintf(stderr, "key:%p\n", (void *)key);
3072 }
3073
3074 // duplication check
3075 if (set_insert(dup_check_table, key)) {
3076 // already shown
3077 }
3078 else if (RTEST(ruby_verbose) || strict_unused_block) {
3079 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3080 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3081
3082 if (!NIL_P(m_loc)) {
3083 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3084 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3085 }
3086 else {
3087 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3088 }
3089 }
3090}
3091
3092static inline int
3093vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3094 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3095{
3096 const struct rb_callinfo *ci = calling->cd->ci;
3097 const struct rb_callcache *cc = calling->cc;
3098
3099 VM_ASSERT((vm_ci_argc(ci), 1));
3100 VM_ASSERT(vm_cc_cme(cc) != NULL);
3101
3102 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3103 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3104 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3105 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3106 }
3107
3108 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3109 if (LIKELY(rb_simple_iseq_p(iseq))) {
3110 rb_control_frame_t *cfp = ec->cfp;
3111 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3112 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3113
3114 if (calling->argc != lead_num) {
3115 argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3116 }
3117
3118 //VM_ASSERT(ci == calling->cd->ci);
3119 VM_ASSERT(cc == calling->cc);
3120
3121 if (vm_call_iseq_optimizable_p(ci, cc)) {
3122 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3123 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3124 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3125 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3126 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3127 }
3128 else {
3129 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3130 }
3131 }
3132 return 0;
3133 }
3134 else if (rb_iseq_only_optparam_p(iseq)) {
3135 rb_control_frame_t *cfp = ec->cfp;
3136
3137 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3138 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3139
3140 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3141 const int argc = calling->argc;
3142 const int opt = argc - lead_num;
3143
3144 if (opt < 0 || opt > opt_num) {
3145 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3146 }
3147
3148 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3149 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3150 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3151 vm_call_cacheable(ci, cc));
3152 }
3153 else {
3154 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3155 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3156 vm_call_cacheable(ci, cc));
3157 }
3158
3159 /* initialize opt vars for self-references */
3160 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3161 for (int i=argc; i<lead_num + opt_num; i++) {
3162 argv[i] = Qnil;
3163 }
3164 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3165 }
3166 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3167 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3168 const int argc = calling->argc;
3169 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3170
3171 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3172 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3173
3174 if (argc - kw_arg->keyword_len == lead_num) {
3175 const int ci_kw_len = kw_arg->keyword_len;
3176 const VALUE * const ci_keywords = kw_arg->keywords;
3177 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3178 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3179
3180 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3181 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3182
3183 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3184 vm_call_cacheable(ci, cc));
3185
3186 return 0;
3187 }
3188 }
3189 else if (argc == lead_num) {
3190 /* no kwarg */
3191 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3192 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3193
3194 if (klocals[kw_param->num] == INT2FIX(0)) {
3195 /* copy from default_values */
3196 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3197 vm_call_cacheable(ci, cc));
3198 }
3199
3200 return 0;
3201 }
3202 }
3203 }
3204
3205 // Called iseq is using ... param
3206 // def foo(...) # <- iseq for foo will have "forwardable"
3207 //
3208 // We want to set the `...` local to the caller's CI
3209 // foo(1, 2) # <- the ci for this should end up as `...`
3210 //
3211 // So hopefully the stack looks like:
3212 //
3213 // => 1
3214 // => 2
3215 // => *
3216 // => **
3217 // => &
3218 // => ... # <- points at `foo`s CI
3219 // => cref_or_me
3220 // => specval
3221 // => type
3222 //
3223 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3224 bool can_fastpath = true;
3225
3226 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3227 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3228 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3229 ci = vm_ci_new_runtime(
3230 vm_ci_mid(ci),
3231 vm_ci_flag(ci),
3232 vm_ci_argc(ci),
3233 vm_ci_kwarg(ci));
3234 }
3235 else {
3236 ci = forward_cd->caller_ci;
3237 }
3238 can_fastpath = false;
3239 }
3240 // C functions calling iseqs will stack allocate a CI,
3241 // so we need to convert it to heap allocated
3242 if (!vm_ci_markable(ci)) {
3243 ci = vm_ci_new_runtime(
3244 vm_ci_mid(ci),
3245 vm_ci_flag(ci),
3246 vm_ci_argc(ci),
3247 vm_ci_kwarg(ci));
3248 can_fastpath = false;
3249 }
3250 argv[param_size - 1] = (VALUE)ci;
3251 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3252 return 0;
3253 }
3254
3255 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3256}
3257
3258static void
3259vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3260{
3261 // This case is when the caller is using a ... parameter.
3262 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3263 // In this case the caller's caller's CI will be on the stack.
3264 //
3265 // For example:
3266 //
3267 // def bar(a, b); a + b; end
3268 // def foo(...); bar(...); end
3269 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3270 //
3271 // Stack layout will be:
3272 //
3273 // > 1
3274 // > 2
3275 // > CI for foo(1, 2)
3276 // > cref_or_me
3277 // > specval
3278 // > type
3279 // > receiver
3280 // > CI for foo(1, 2), via `getlocal ...`
3281 // > ( SP points here )
3282 const VALUE * lep = VM_CF_LEP(cfp);
3283
3284 const rb_iseq_t *iseq;
3285
3286 // If we're in an escaped environment (lambda for example), get the iseq
3287 // from the captured env.
3288 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3289 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3290 iseq = env->iseq;
3291 }
3292 else { // Otherwise use the lep to find the caller
3293 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3294 }
3295
3296 // Our local storage is below the args we need to copy
3297 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3298
3299 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3300 VALUE * to = cfp->sp - 1; // clobber the CI
3301
3302 if (RTEST(splat)) {
3303 to -= 1; // clobber the splat array
3304 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3305 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3306 to += RARRAY_LEN(splat);
3307 }
3308
3309 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3310 MEMCPY(to, from, VALUE, argc);
3311 cfp->sp = to + argc;
3312
3313 // Stack layout should now be:
3314 //
3315 // > 1
3316 // > 2
3317 // > CI for foo(1, 2)
3318 // > cref_or_me
3319 // > specval
3320 // > type
3321 // > receiver
3322 // > 1
3323 // > 2
3324 // > ( SP points here )
3325}
3326
3327static VALUE
3328vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3329{
3330 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3331
3332 const struct rb_callcache *cc = calling->cc;
3333 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3334 int param_size = ISEQ_BODY(iseq)->param.size;
3335 int local_size = ISEQ_BODY(iseq)->local_table_size;
3336
3337 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3338
3339 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3340 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3341}
3342
3343static VALUE
3344vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3345{
3346 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3347
3348 const struct rb_callcache *cc = calling->cc;
3349 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3350 int param_size = ISEQ_BODY(iseq)->param.size;
3351 int local_size = ISEQ_BODY(iseq)->local_table_size;
3352
3353 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3354
3355 // Setting up local size and param size
3356 local_size = local_size + vm_ci_argc(calling->cd->ci);
3357 param_size = param_size + vm_ci_argc(calling->cd->ci);
3358
3359 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3360 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3361}
3362
3363static inline VALUE
3364vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3365 int opt_pc, int param_size, int local_size)
3366{
3367 const struct rb_callinfo *ci = calling->cd->ci;
3368 const struct rb_callcache *cc = calling->cc;
3369
3370 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3371 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3372 }
3373 else {
3374 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3375 }
3376}
3377
3378static inline VALUE
3379vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3380 int opt_pc, int param_size, int local_size)
3381{
3382 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3383 VALUE *argv = cfp->sp - calling->argc;
3384 VALUE *sp = argv + param_size;
3385 cfp->sp = argv - 1 /* recv */;
3386
3387 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3388 calling->block_handler, (VALUE)me,
3389 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3390 local_size - param_size,
3391 ISEQ_BODY(iseq)->stack_max);
3392 return Qundef;
3393}
3394
3395static inline VALUE
3396vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3397{
3398 const struct rb_callcache *cc = calling->cc;
3399 unsigned int i;
3400 VALUE *argv = cfp->sp - calling->argc;
3401 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3402 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3403 VALUE *src_argv = argv;
3404 VALUE *sp_orig, *sp;
3405 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3406
3407 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3408 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3409 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3410 dst_captured->code.val = src_captured->code.val;
3411 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3412 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3413 }
3414 else {
3415 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3416 }
3417 }
3418
3419 vm_pop_frame(ec, cfp, cfp->ep);
3420 cfp = ec->cfp;
3421
3422 sp_orig = sp = cfp->sp;
3423
3424 /* push self */
3425 sp[0] = calling->recv;
3426 sp++;
3427
3428 /* copy arguments */
3429 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3430 *sp++ = src_argv[i];
3431 }
3432
3433 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3434 calling->recv, calling->block_handler, (VALUE)me,
3435 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3436 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3437 ISEQ_BODY(iseq)->stack_max);
3438
3439 cfp->sp = sp_orig;
3440
3441 return Qundef;
3442}
3443
3444static void
3445ractor_unsafe_check(void)
3446{
3447 if (!rb_ractor_main_p()) {
3448 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3449 }
3450}
3451
3452static VALUE
3453call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3454{
3455 ractor_unsafe_check();
3456 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3457 return (*f)(recv, rb_ary_new4(argc, argv));
3458}
3459
3460static VALUE
3461call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3462{
3463 ractor_unsafe_check();
3464 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3465 return (*f)(argc, argv, recv);
3466}
3467
3468static VALUE
3469call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3470{
3471 ractor_unsafe_check();
3472 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3473 return (*f)(recv);
3474}
3475
3476static VALUE
3477call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3478{
3479 ractor_unsafe_check();
3480 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3481 return (*f)(recv, argv[0]);
3482}
3483
3484static VALUE
3485call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3486{
3487 ractor_unsafe_check();
3488 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3489 return (*f)(recv, argv[0], argv[1]);
3490}
3491
3492static VALUE
3493call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3494{
3495 ractor_unsafe_check();
3496 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3497 return (*f)(recv, argv[0], argv[1], argv[2]);
3498}
3499
3500static VALUE
3501call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3502{
3503 ractor_unsafe_check();
3504 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3505 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3506}
3507
3508static VALUE
3509call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3510{
3511 ractor_unsafe_check();
3512 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3513 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3514}
3515
3516static VALUE
3517call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3518{
3519 ractor_unsafe_check();
3521 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3522}
3523
3524static VALUE
3525call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3526{
3527 ractor_unsafe_check();
3529 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3530}
3531
3532static VALUE
3533call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3534{
3535 ractor_unsafe_check();
3537 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3538}
3539
3540static VALUE
3541call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3542{
3543 ractor_unsafe_check();
3545 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3546}
3547
3548static VALUE
3549call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3550{
3551 ractor_unsafe_check();
3553 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3554}
3555
3556static VALUE
3557call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3558{
3559 ractor_unsafe_check();
3561 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3562}
3563
3564static VALUE
3565call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3566{
3567 ractor_unsafe_check();
3569 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3570}
3571
3572static VALUE
3573call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3574{
3575 ractor_unsafe_check();
3577 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3578}
3579
3580static VALUE
3581call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3582{
3583 ractor_unsafe_check();
3585 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3586}
3587
3588static VALUE
3589call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3590{
3591 ractor_unsafe_check();
3593 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3594}
3595
3596static VALUE
3597ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3598{
3599 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3600 return (*f)(recv, rb_ary_new4(argc, argv));
3601}
3602
3603static VALUE
3604ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3605{
3606 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3607 return (*f)(argc, argv, recv);
3608}
3609
3610static VALUE
3611ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3612{
3613 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3614 return (*f)(recv);
3615}
3616
3617static VALUE
3618ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3619{
3620 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3621 return (*f)(recv, argv[0]);
3622}
3623
3624static VALUE
3625ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3626{
3627 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3628 return (*f)(recv, argv[0], argv[1]);
3629}
3630
3631static VALUE
3632ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3633{
3634 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3635 return (*f)(recv, argv[0], argv[1], argv[2]);
3636}
3637
3638static VALUE
3639ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3640{
3641 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3642 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3643}
3644
3645static VALUE
3646ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3647{
3648 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3649 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3650}
3651
3652static VALUE
3653ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3654{
3656 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3657}
3658
3659static VALUE
3660ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3661{
3663 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3664}
3665
3666static VALUE
3667ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3668{
3670 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3671}
3672
3673static VALUE
3674ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3675{
3677 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3678}
3679
3680static VALUE
3681ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3682{
3684 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3685}
3686
3687static VALUE
3688ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3689{
3691 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3692}
3693
3694static VALUE
3695ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3696{
3698 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3699}
3700
3701static VALUE
3702ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3703{
3705 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3706}
3707
3708static VALUE
3709ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3710{
3712 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3713}
3714
3715static VALUE
3716ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3717{
3719 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3720}
3721
3722static inline int
3723vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3724{
3725 const int ov_flags = RAISED_STACKOVERFLOW;
3726 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3727 if (rb_ec_raised_p(ec, ov_flags)) {
3728 rb_ec_raised_reset(ec, ov_flags);
3729 return TRUE;
3730 }
3731 return FALSE;
3732}
3733
3734#define CHECK_CFP_CONSISTENCY(func) \
3735 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3736 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3737
3738static inline
3739const rb_method_cfunc_t *
3740vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3741{
3742#if VM_DEBUG_VERIFY_METHOD_CACHE
3743 switch (me->def->type) {
3744 case VM_METHOD_TYPE_CFUNC:
3745 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3746 break;
3747# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3748 METHOD_BUG(ISEQ);
3749 METHOD_BUG(ATTRSET);
3750 METHOD_BUG(IVAR);
3751 METHOD_BUG(BMETHOD);
3752 METHOD_BUG(ZSUPER);
3753 METHOD_BUG(UNDEF);
3754 METHOD_BUG(OPTIMIZED);
3755 METHOD_BUG(MISSING);
3756 METHOD_BUG(REFINED);
3757 METHOD_BUG(ALIAS);
3758# undef METHOD_BUG
3759 default:
3760 rb_bug("wrong method type: %d", me->def->type);
3761 }
3762#endif
3763 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3764}
3765
3766static VALUE
3767vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3768 int argc, VALUE *argv, VALUE *stack_bottom)
3769{
3770 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3771 const struct rb_callinfo *ci = calling->cd->ci;
3772 const struct rb_callcache *cc = calling->cc;
3773 VALUE val;
3774 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3775 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3776
3777 VALUE recv = calling->recv;
3778 VALUE block_handler = calling->block_handler;
3779 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3780
3781 if (UNLIKELY(calling->kw_splat)) {
3782 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3783 }
3784
3785 VM_ASSERT(reg_cfp == ec->cfp);
3786
3787 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3788 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3789
3790 vm_push_frame(ec, NULL, frame_type, recv,
3791 block_handler, (VALUE)me,
3792 0, ec->cfp->sp, 0, 0);
3793
3794 int len = cfunc->argc;
3795 if (len >= 0) rb_check_arity(argc, len, len);
3796
3797 reg_cfp->sp = stack_bottom;
3798 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3799
3800 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3801
3802 rb_vm_pop_frame(ec);
3803
3804 VM_ASSERT(ec->cfp->sp == stack_bottom);
3805
3806 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3807 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3808
3809 return val;
3810}
3811
3812// Push a C method frame for a given cme. This is called when JIT code skipped
3813// pushing a frame but the C method reached a point where a frame is needed.
3814void
3815rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3816{
3817 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3818 rb_execution_context_t *ec = GET_EC();
3819 VALUE *sp = ec->cfp->sp;
3820 VALUE recv = *(sp - recv_idx - 1);
3821 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3822 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3823#if VM_CHECK_MODE > 0
3824 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3825 *(GET_EC()->cfp->sp) = Qfalse;
3826#endif
3827 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3828}
3829
3830// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3831bool
3832rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3833{
3834 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3835}
3836
3837static VALUE
3838vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3839{
3840 int argc = calling->argc;
3841 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3842 VALUE *argv = &stack_bottom[1];
3843
3844 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3845}
3846
3847static VALUE
3848vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3849{
3850 const struct rb_callinfo *ci = calling->cd->ci;
3851 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3852
3853 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3854 VALUE argv_ary;
3855 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3856 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3857 int argc = RARRAY_LENINT(argv_ary);
3858 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3859 VALUE *stack_bottom = reg_cfp->sp - 2;
3860
3861 VM_ASSERT(calling->argc == 1);
3862 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3863 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3864
3865 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3866 }
3867 else {
3868 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3869
3870 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3871 }
3872}
3873
3874static inline VALUE
3875vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3876{
3877 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3878 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3879
3880 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3881 return vm_call_cfunc_other(ec, reg_cfp, calling);
3882 }
3883
3884 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3885 calling->kw_splat = 0;
3886 int i;
3887 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3888 VALUE *sp = stack_bottom;
3889 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3890 for(i = 0; i < argc; i++) {
3891 *++sp = argv[i];
3892 }
3893 reg_cfp->sp = sp+1;
3894
3895 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3896}
3897
3898static inline VALUE
3899vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3900{
3901 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3902 VALUE argv_ary = reg_cfp->sp[-1];
3903 int argc = RARRAY_LENINT(argv_ary);
3904 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3905 VALUE last_hash;
3906 int argc_offset = 0;
3907
3908 if (UNLIKELY(argc > 0 &&
3909 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3910 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3911 if (!RHASH_EMPTY_P(last_hash)) {
3912 return vm_call_cfunc_other(ec, reg_cfp, calling);
3913 }
3914 argc_offset++;
3915 }
3916 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3917}
3918
3919static inline VALUE
3920vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3921{
3922 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3923 VALUE keyword_hash = reg_cfp->sp[-1];
3924
3925 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3926 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3927 }
3928
3929 return vm_call_cfunc_other(ec, reg_cfp, calling);
3930}
3931
3932static VALUE
3933vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3934{
3935 const struct rb_callinfo *ci = calling->cd->ci;
3936 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3937
3938 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3939 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3940 // f(*a)
3941 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3942 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3943 }
3944 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3945 // f(*a, **kw)
3946 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3947 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3948 }
3949 }
3950
3951 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3952 return vm_call_cfunc_other(ec, reg_cfp, calling);
3953}
3954
3955static VALUE
3956vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3957{
3958 const struct rb_callcache *cc = calling->cc;
3959 RB_DEBUG_COUNTER_INC(ccf_ivar);
3960 cfp->sp -= 1;
3961 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3962 return ivar;
3963}
3964
3965static VALUE
3966vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3967{
3968 RB_DEBUG_COUNTER_INC(ccf_attrset);
3969 VALUE val = *(cfp->sp - 1);
3970 cfp->sp -= 2;
3971 attr_index_t index = vm_cc_attr_index(cc);
3972 shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3973 ID id = vm_cc_cme(cc)->def->body.attr.id;
3974 rb_check_frozen(obj);
3975 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3976 if (UNDEF_P(res)) {
3977 switch (BUILTIN_TYPE(obj)) {
3978 case T_OBJECT:
3979 case T_CLASS:
3980 case T_MODULE:
3981 break;
3982 default:
3983 {
3984 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3985 if (!UNDEF_P(res)) {
3986 return res;
3987 }
3988 }
3989 }
3990 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3991 }
3992 return res;
3993}
3994
3995static VALUE
3996vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3997{
3998 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3999}
4000
4001static inline VALUE
4002vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4003{
4004 rb_proc_t *proc;
4005 VALUE val;
4006 const struct rb_callcache *cc = calling->cc;
4007 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4008 VALUE procv = cme->def->body.bmethod.proc;
4009
4010 if (!RB_OBJ_SHAREABLE_P(procv) &&
4011 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4012 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4013 }
4014
4015 /* control block frame */
4016 GetProcPtr(procv, proc);
4017 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4018
4019 return val;
4020}
4021
4022static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4023
4024static VALUE
4025vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4026{
4027 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4028
4029 const struct rb_callcache *cc = calling->cc;
4030 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4031 VALUE procv = cme->def->body.bmethod.proc;
4032
4033 if (!RB_OBJ_SHAREABLE_P(procv) &&
4034 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4035 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4036 }
4037
4038 rb_proc_t *proc;
4039 GetProcPtr(procv, proc);
4040 const struct rb_block *block = &proc->block;
4041
4042 while (vm_block_type(block) == block_type_proc) {
4043 block = vm_proc_block(block->as.proc);
4044 }
4045 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4046
4047 const struct rb_captured_block *captured = &block->as.captured;
4048 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4049 VALUE * const argv = cfp->sp - calling->argc;
4050 const int arg_size = ISEQ_BODY(iseq)->param.size;
4051
4052 int opt_pc;
4053 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4054 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4055 }
4056 else {
4057 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4058 }
4059
4060 cfp->sp = argv - 1; // -1 for the receiver
4061
4062 vm_push_frame(ec, iseq,
4063 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4064 calling->recv,
4065 VM_GUARDED_PREV_EP(captured->ep),
4066 (VALUE)cme,
4067 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4068 argv + arg_size,
4069 ISEQ_BODY(iseq)->local_table_size - arg_size,
4070 ISEQ_BODY(iseq)->stack_max);
4071
4072 return Qundef;
4073}
4074
4075static VALUE
4076vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4077{
4078 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4079
4080 VALUE *argv;
4081 int argc;
4082 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4083 if (UNLIKELY(calling->heap_argv)) {
4084 argv = RARRAY_PTR(calling->heap_argv);
4085 cfp->sp -= 2;
4086 }
4087 else {
4088 argc = calling->argc;
4089 argv = ALLOCA_N(VALUE, argc);
4090 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4091 cfp->sp += - argc - 1;
4092 }
4093
4094 return vm_call_bmethod_body(ec, calling, argv);
4095}
4096
4097static VALUE
4098vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4099{
4100 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4101
4102 const struct rb_callcache *cc = calling->cc;
4103 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4104 VALUE procv = cme->def->body.bmethod.proc;
4105 rb_proc_t *proc;
4106 GetProcPtr(procv, proc);
4107 const struct rb_block *block = &proc->block;
4108
4109 while (vm_block_type(block) == block_type_proc) {
4110 block = vm_proc_block(block->as.proc);
4111 }
4112 if (vm_block_type(block) == block_type_iseq) {
4113 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4114 return vm_call_iseq_bmethod(ec, cfp, calling);
4115 }
4116
4117 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4118 return vm_call_noniseq_bmethod(ec, cfp, calling);
4119}
4120
4121VALUE
4122rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4123{
4124 VALUE klass = current_class;
4125
4126 /* for prepended Module, then start from cover class */
4127 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4128 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4129 klass = RBASIC_CLASS(klass);
4130 }
4131
4132 while (RTEST(klass)) {
4133 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4134 if (owner == target_owner) {
4135 return klass;
4136 }
4137 klass = RCLASS_SUPER(klass);
4138 }
4139
4140 return current_class; /* maybe module function */
4141}
4142
4143static const rb_callable_method_entry_t *
4144aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4145{
4146 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4147 const rb_callable_method_entry_t *cme;
4148
4149 if (orig_me->defined_class == 0) {
4150 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4151 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4152 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4153
4154 if (me->def->reference_count == 1) {
4155 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4156 }
4157 else {
4159 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4160 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4161 }
4162 }
4163 else {
4164 cme = (const rb_callable_method_entry_t *)orig_me;
4165 }
4166
4167 VM_ASSERT(callable_method_entry_p(cme));
4168 return cme;
4169}
4170
4172rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4173{
4174 return aliased_callable_method_entry(me);
4175}
4176
4177static VALUE
4178vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4179{
4180 calling->cc = &VM_CC_ON_STACK(Qundef,
4181 vm_call_general,
4182 {{0}},
4183 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4184
4185 return vm_call_method_each_type(ec, cfp, calling);
4186}
4187
4188static enum method_missing_reason
4189ci_missing_reason(const struct rb_callinfo *ci)
4190{
4191 enum method_missing_reason stat = MISSING_NOENTRY;
4192 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4193 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4194 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4195 return stat;
4196}
4197
4198static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4199
4200static VALUE
4201vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4202 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4203{
4204 ASSUME(calling->argc >= 0);
4205
4206 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4207 int argc = calling->argc;
4208 VALUE recv = calling->recv;
4209 VALUE klass = CLASS_OF(recv);
4210 ID mid = rb_check_id(&symbol);
4211 flags |= VM_CALL_OPT_SEND;
4212
4213 if (UNLIKELY(! mid)) {
4214 mid = idMethodMissing;
4215 missing_reason = ci_missing_reason(ci);
4216 ec->method_missing_reason = missing_reason;
4217
4218 VALUE argv_ary;
4219 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4220 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4221 rb_ary_unshift(argv_ary, symbol);
4222
4223 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4224 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4225 VALUE exc = rb_make_no_method_exception(
4226 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4227
4228 rb_exc_raise(exc);
4229 }
4230 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4231 }
4232 else {
4233 /* E.g. when argc == 2
4234 *
4235 * | | | | TOPN
4236 * | | +------+
4237 * | | +---> | arg1 | 0
4238 * +------+ | +------+
4239 * | arg1 | -+ +-> | arg0 | 1
4240 * +------+ | +------+
4241 * | arg0 | ---+ | sym | 2
4242 * +------+ +------+
4243 * | recv | | recv | 3
4244 * --+------+--------+------+------
4245 */
4246 int i = argc;
4247 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4248 INC_SP(1);
4249 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4250 argc = ++calling->argc;
4251
4252 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4253 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4254 TOPN(i) = symbol;
4255 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4256 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4257 VALUE exc = rb_make_no_method_exception(
4258 rb_eNoMethodError, 0, recv, argc, argv, priv);
4259
4260 rb_exc_raise(exc);
4261 }
4262 else {
4263 TOPN(i) = rb_str_intern(symbol);
4264 }
4265 }
4266 }
4267
4268 struct rb_forwarding_call_data new_fcd = {
4269 .cd = {
4270 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4271 .cc = NULL,
4272 },
4273 .caller_ci = NULL,
4274 };
4275
4276 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4277 calling->cd = &new_fcd.cd;
4278 }
4279 else {
4280 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4281 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4282 new_fcd.caller_ci = caller_ci;
4283 calling->cd = (struct rb_call_data *)&new_fcd;
4284 }
4285 calling->cc = &VM_CC_ON_STACK(klass,
4286 vm_call_general,
4287 { .method_missing_reason = missing_reason },
4288 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4289
4290 if (flags & VM_CALL_FCALL) {
4291 return vm_call_method(ec, reg_cfp, calling);
4292 }
4293
4294 const struct rb_callcache *cc = calling->cc;
4295 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4296
4297 if (vm_cc_cme(cc) != NULL) {
4298 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4299 case METHOD_VISI_PUBLIC: /* likely */
4300 return vm_call_method_each_type(ec, reg_cfp, calling);
4301 case METHOD_VISI_PRIVATE:
4302 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4303 break;
4304 case METHOD_VISI_PROTECTED:
4305 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4306 break;
4307 default:
4308 VM_UNREACHABLE(vm_call_method);
4309 }
4310 return vm_call_method_missing(ec, reg_cfp, calling);
4311 }
4312
4313 return vm_call_method_nome(ec, reg_cfp, calling);
4314}
4315
4316static VALUE
4317vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4318{
4319 const struct rb_callinfo *ci = calling->cd->ci;
4320 int i;
4321 VALUE sym;
4322
4323 i = calling->argc - 1;
4324
4325 if (calling->argc == 0) {
4326 rb_raise(rb_eArgError, "no method name given");
4327 }
4328
4329 sym = TOPN(i);
4330 /* E.g. when i == 2
4331 *
4332 * | | | | TOPN
4333 * +------+ | |
4334 * | arg1 | ---+ | | 0
4335 * +------+ | +------+
4336 * | arg0 | -+ +-> | arg1 | 1
4337 * +------+ | +------+
4338 * | sym | +---> | arg0 | 2
4339 * +------+ +------+
4340 * | recv | | recv | 3
4341 * --+------+--------+------+------
4342 */
4343 /* shift arguments */
4344 if (i > 0) {
4345 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4346 }
4347 calling->argc -= 1;
4348 DEC_SP(1);
4349
4350 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4351}
4352
4353static VALUE
4354vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4355{
4356 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4357 const struct rb_callinfo *ci = calling->cd->ci;
4358 int flags = VM_CALL_FCALL;
4359 VALUE sym;
4360
4361 VALUE argv_ary;
4362 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4363 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4364 sym = rb_ary_shift(argv_ary);
4365 flags |= VM_CALL_ARGS_SPLAT;
4366 if (calling->kw_splat) {
4367 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4368 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4369 calling->kw_splat = 0;
4370 }
4371 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4372 }
4373
4374 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4375 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4376}
4377
4378static VALUE
4379vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4380{
4381 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4382 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4383}
4384
4385static VALUE
4386vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4387{
4388 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4389
4390 const struct rb_callinfo *ci = calling->cd->ci;
4391 int flags = vm_ci_flag(ci);
4392
4393 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4394 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4395 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4396 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4397 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4398 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4399 }
4400
4401 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4402 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4403}
4404
4405static VALUE
4406vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4407 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4408{
4409 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4410
4411 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4412 unsigned int argc, flag;
4413
4414 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4415 argc = ++calling->argc;
4416
4417 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4418 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4419 vm_check_canary(ec, reg_cfp->sp);
4420 if (argc > 1) {
4421 MEMMOVE(argv+1, argv, VALUE, argc-1);
4422 }
4423 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4424 INC_SP(1);
4425
4426 ec->method_missing_reason = reason;
4427
4428 struct rb_forwarding_call_data new_fcd = {
4429 .cd = {
4430 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4431 .cc = NULL,
4432 },
4433 .caller_ci = NULL,
4434 };
4435
4436 if (!(flag & VM_CALL_FORWARDING)) {
4437 calling->cd = &new_fcd.cd;
4438 }
4439 else {
4440 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4441 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4442 new_fcd.caller_ci = caller_ci;
4443 calling->cd = (struct rb_call_data *)&new_fcd;
4444 }
4445
4446 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4447 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4448 return vm_call_method(ec, reg_cfp, calling);
4449}
4450
4451static VALUE
4452vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4453{
4454 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4455}
4456
4457static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4458static VALUE
4459vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4460{
4461 klass = RCLASS_SUPER(klass);
4462
4463 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4464 if (cme == NULL) {
4465 return vm_call_method_nome(ec, cfp, calling);
4466 }
4467 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4468 cme->def->body.refined.orig_me) {
4469 cme = refined_method_callable_without_refinement(cme);
4470 }
4471
4472 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4473
4474 return vm_call_method_each_type(ec, cfp, calling);
4475}
4476
4477static inline VALUE
4478find_refinement(VALUE refinements, VALUE klass)
4479{
4480 if (NIL_P(refinements)) {
4481 return Qnil;
4482 }
4483 return rb_hash_lookup(refinements, klass);
4484}
4485
4486PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4487static rb_control_frame_t *
4488current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4489{
4490 rb_control_frame_t *top_cfp = cfp;
4491
4492 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4493 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4494
4495 do {
4496 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4497 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4498 /* TODO: orphan block */
4499 return top_cfp;
4500 }
4501 } while (cfp->iseq != local_iseq);
4502 }
4503 return cfp;
4504}
4505
4506static const rb_callable_method_entry_t *
4507refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4508{
4509 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4510 const rb_callable_method_entry_t *cme;
4511
4512 if (orig_me->defined_class == 0) {
4513 cme = NULL;
4515 }
4516 else {
4517 cme = (const rb_callable_method_entry_t *)orig_me;
4518 }
4519
4520 VM_ASSERT(callable_method_entry_p(cme));
4521
4522 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4523 cme = NULL;
4524 }
4525
4526 return cme;
4527}
4528
4529static const rb_callable_method_entry_t *
4530search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4531{
4532 ID mid = vm_ci_mid(calling->cd->ci);
4533 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4534 const struct rb_callcache * const cc = calling->cc;
4535 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4536
4537 for (; cref; cref = CREF_NEXT(cref)) {
4538 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4539 if (NIL_P(refinement)) continue;
4540
4541 const rb_callable_method_entry_t *const ref_me =
4542 rb_callable_method_entry(refinement, mid);
4543
4544 if (ref_me) {
4545 if (vm_cc_call(cc) == vm_call_super_method) {
4546 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4547 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4548 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4549 continue;
4550 }
4551 }
4552
4553 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4554 cme->def != ref_me->def) {
4555 cme = ref_me;
4556 }
4557 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4558 return cme;
4559 }
4560 }
4561 else {
4562 return NULL;
4563 }
4564 }
4565
4566 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4567 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4568 }
4569 else {
4570 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4571 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4572 return cme;
4573 }
4574}
4575
4576static VALUE
4577vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4578{
4579 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4580
4581 if (ref_cme) {
4582 if (calling->cd->cc) {
4583 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4584 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4585 return vm_call_method(ec, cfp, calling);
4586 }
4587 else {
4588 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4589 calling->cc= ref_cc;
4590 return vm_call_method(ec, cfp, calling);
4591 }
4592 }
4593 else {
4594 return vm_call_method_nome(ec, cfp, calling);
4595 }
4596}
4597
4598static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4599
4600NOINLINE(static VALUE
4601 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4602 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4603
4604static VALUE
4605vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4606 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4607{
4608 int argc = calling->argc;
4609
4610 /* remove self */
4611 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4612 DEC_SP(1);
4613
4614 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4615}
4616
4617static VALUE
4618vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4619{
4620 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4621
4622 const struct rb_callinfo *ci = calling->cd->ci;
4623 VALUE procval = calling->recv;
4624 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4625}
4626
4627static VALUE
4628vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4629{
4630 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4631
4632 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4633 const struct rb_callinfo *ci = calling->cd->ci;
4634
4635 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4636 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4637 }
4638 else {
4639 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4640 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4641 return vm_call_general(ec, reg_cfp, calling);
4642 }
4643}
4644
4645static VALUE
4646vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4647{
4648 VALUE recv = calling->recv;
4649
4650 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4651 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4652 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4653
4654 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4655 return internal_RSTRUCT_GET(recv, off);
4656}
4657
4658static VALUE
4659vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4660{
4661 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4662
4663 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4664 reg_cfp->sp -= 1;
4665 return ret;
4666}
4667
4668static VALUE
4669vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4670{
4671 VALUE recv = calling->recv;
4672
4673 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4674 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4675 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4676
4677 rb_check_frozen(recv);
4678
4679 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4680 internal_RSTRUCT_SET(recv, off, val);
4681
4682 return val;
4683}
4684
4685static VALUE
4686vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4687{
4688 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4689
4690 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4691 reg_cfp->sp -= 2;
4692 return ret;
4693}
4694
4695NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4696 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4697
4698#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4699 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4700 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4701 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4702 var = func; \
4703 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4704 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4705 } \
4706 else { \
4707 nohook; \
4708 var = func; \
4709 }
4710
4711static VALUE
4712vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4713 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4714{
4715 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4716 case OPTIMIZED_METHOD_TYPE_SEND:
4717 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4718 return vm_call_opt_send(ec, cfp, calling);
4719 case OPTIMIZED_METHOD_TYPE_CALL:
4720 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4721 return vm_call_opt_call(ec, cfp, calling);
4722 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4723 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4724 return vm_call_opt_block_call(ec, cfp, calling);
4725 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4726 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4727 rb_check_arity(calling->argc, 0, 0);
4728
4729 VALUE v;
4730 VM_CALL_METHOD_ATTR(v,
4731 vm_call_opt_struct_aref(ec, cfp, calling),
4732 set_vm_cc_ivar(cc); \
4733 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4734 return v;
4735 }
4736 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4737 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4738 rb_check_arity(calling->argc, 1, 1);
4739
4740 VALUE v;
4741 VM_CALL_METHOD_ATTR(v,
4742 vm_call_opt_struct_aset(ec, cfp, calling),
4743 set_vm_cc_ivar(cc); \
4744 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4745 return v;
4746 }
4747 default:
4748 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4749 }
4750}
4751
4752static VALUE
4753vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4754{
4755 const struct rb_callinfo *ci = calling->cd->ci;
4756 const struct rb_callcache *cc = calling->cc;
4757 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4758 VALUE v;
4759
4760 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4761
4762 switch (cme->def->type) {
4763 case VM_METHOD_TYPE_ISEQ:
4764 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4765 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4766 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4767 }
4768 else {
4769 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4770 return vm_call_iseq_setup(ec, cfp, calling);
4771 }
4772
4773 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4774 case VM_METHOD_TYPE_CFUNC:
4775 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4776 return vm_call_cfunc(ec, cfp, calling);
4777
4778 case VM_METHOD_TYPE_ATTRSET:
4779 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4780
4781 rb_check_arity(calling->argc, 1, 1);
4782
4783 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4784
4785 if (vm_cc_markable(cc)) {
4786 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4787 VM_CALL_METHOD_ATTR(v,
4788 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4789 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4790 }
4791 else {
4792 cc = &((struct rb_callcache) {
4793 .flags = T_IMEMO |
4794 (imemo_callcache << FL_USHIFT) |
4795 VM_CALLCACHE_UNMARKABLE |
4796 VM_CALLCACHE_ON_STACK,
4797 .klass = cc->klass,
4798 .cme_ = cc->cme_,
4799 .call_ = cc->call_,
4800 .aux_ = {
4801 .attr = {
4802 .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4803 }
4804 },
4805 });
4806
4807 VM_CALL_METHOD_ATTR(v,
4808 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4809 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4810 }
4811 return v;
4812
4813 case VM_METHOD_TYPE_IVAR:
4814 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4815 rb_check_arity(calling->argc, 0, 0);
4816 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4817 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4818 VM_CALL_METHOD_ATTR(v,
4819 vm_call_ivar(ec, cfp, calling),
4820 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4821 return v;
4822
4823 case VM_METHOD_TYPE_MISSING:
4824 vm_cc_method_missing_reason_set(cc, 0);
4825 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4826 return vm_call_method_missing(ec, cfp, calling);
4827
4828 case VM_METHOD_TYPE_BMETHOD:
4829 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4830 return vm_call_bmethod(ec, cfp, calling);
4831
4832 case VM_METHOD_TYPE_ALIAS:
4833 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4834 return vm_call_alias(ec, cfp, calling);
4835
4836 case VM_METHOD_TYPE_OPTIMIZED:
4837 return vm_call_optimized(ec, cfp, calling, ci, cc);
4838
4839 case VM_METHOD_TYPE_UNDEF:
4840 break;
4841
4842 case VM_METHOD_TYPE_ZSUPER:
4843 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4844
4845 case VM_METHOD_TYPE_REFINED:
4846 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4847 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4848 return vm_call_refined(ec, cfp, calling);
4849 }
4850
4851 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4852}
4853
4854NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4855
4856static VALUE
4857vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4858{
4859 /* method missing */
4860 const struct rb_callinfo *ci = calling->cd->ci;
4861 const int stat = ci_missing_reason(ci);
4862
4863 if (vm_ci_mid(ci) == idMethodMissing) {
4864 if (UNLIKELY(calling->heap_argv)) {
4865 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4866 }
4867 else {
4868 rb_control_frame_t *reg_cfp = cfp;
4869 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4870 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4871 }
4872 }
4873 else {
4874 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4875 }
4876}
4877
4878/* Protected method calls and super invocations need to check that the receiver
4879 * (self for super) inherits the module on which the method is defined.
4880 * In the case of refinements, it should consider the original class not the
4881 * refinement.
4882 */
4883static VALUE
4884vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4885{
4886 VALUE defined_class = me->defined_class;
4887 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4888 return NIL_P(refined_class) ? defined_class : refined_class;
4889}
4890
4891static inline VALUE
4892vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4893{
4894 const struct rb_callinfo *ci = calling->cd->ci;
4895 const struct rb_callcache *cc = calling->cc;
4896
4897 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4898
4899 if (vm_cc_cme(cc) != NULL) {
4900 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4901 case METHOD_VISI_PUBLIC: /* likely */
4902 return vm_call_method_each_type(ec, cfp, calling);
4903
4904 case METHOD_VISI_PRIVATE:
4905 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4906 enum method_missing_reason stat = MISSING_PRIVATE;
4907 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4908
4909 vm_cc_method_missing_reason_set(cc, stat);
4910 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4911 return vm_call_method_missing(ec, cfp, calling);
4912 }
4913 return vm_call_method_each_type(ec, cfp, calling);
4914
4915 case METHOD_VISI_PROTECTED:
4916 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4917 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4918 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4919 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4920 return vm_call_method_missing(ec, cfp, calling);
4921 }
4922 else {
4923 /* caching method info to dummy cc */
4924 VM_ASSERT(vm_cc_cme(cc) != NULL);
4925 struct rb_callcache cc_on_stack = *cc;
4926 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4927 calling->cc = &cc_on_stack;
4928 return vm_call_method_each_type(ec, cfp, calling);
4929 }
4930 }
4931 return vm_call_method_each_type(ec, cfp, calling);
4932
4933 default:
4934 rb_bug("unreachable");
4935 }
4936 }
4937 else {
4938 return vm_call_method_nome(ec, cfp, calling);
4939 }
4940}
4941
4942static VALUE
4943vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4944{
4945 RB_DEBUG_COUNTER_INC(ccf_general);
4946 return vm_call_method(ec, reg_cfp, calling);
4947}
4948
4949void
4950rb_vm_cc_general(const struct rb_callcache *cc)
4951{
4952 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4953 VM_ASSERT(cc != vm_cc_empty());
4954
4955 *(vm_call_handler *)&cc->call_ = vm_call_general;
4956}
4957
4958static VALUE
4959vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4960{
4961 RB_DEBUG_COUNTER_INC(ccf_super_method);
4962
4963 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4964 // can merge the function and the address of the function becomes same.
4965 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4966 if (ec == NULL) rb_bug("unreachable");
4967
4968 /* this check is required to distinguish with other functions. */
4969 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4970 return vm_call_method(ec, reg_cfp, calling);
4971}
4972
4973/* super */
4974
4975static inline VALUE
4976vm_search_normal_superclass(VALUE klass)
4977{
4978 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4979 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4980 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4981 klass = RBASIC(klass)->klass;
4982 }
4983 klass = RCLASS_ORIGIN(klass);
4984 return RCLASS_SUPER(klass);
4985}
4986
4987NORETURN(static void vm_super_outside(void));
4988
4989static void
4990vm_super_outside(void)
4991{
4992 rb_raise(rb_eNoMethodError, "super called outside of method");
4993}
4994
4995static const struct rb_callcache *
4996empty_cc_for_super(void)
4997{
4998 return &vm_empty_cc_for_super;
4999}
5000
5001static const struct rb_callcache *
5002vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5003{
5004 VALUE current_defined_class;
5005 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5006
5007 if (!me) {
5008 vm_super_outside();
5009 }
5010
5011 current_defined_class = vm_defined_class_for_protected_call(me);
5012
5013 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5014 reg_cfp->iseq != method_entry_iseqptr(me) &&
5015 !rb_obj_is_kind_of(recv, current_defined_class)) {
5016 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5017 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5018
5019 if (m) { /* not bound UnboundMethod */
5020 rb_raise(rb_eTypeError,
5021 "self has wrong type to call super in this context: "
5022 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5023 rb_obj_class(recv), m);
5024 }
5025 }
5026
5027 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5028 rb_raise(rb_eRuntimeError,
5029 "implicit argument passing of super from method defined"
5030 " by define_method() is not supported."
5031 " Specify all arguments explicitly.");
5032 }
5033
5034 ID mid = me->def->original_id;
5035
5036 if (!vm_ci_markable(cd->ci)) {
5037 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5038 }
5039 else {
5040 // update iseq. really? (TODO)
5041 cd->ci = vm_ci_new_runtime(mid,
5042 vm_ci_flag(cd->ci),
5043 vm_ci_argc(cd->ci),
5044 vm_ci_kwarg(cd->ci));
5045
5046 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5047 }
5048
5049 const struct rb_callcache *cc;
5050
5051 VALUE klass = vm_search_normal_superclass(me->defined_class);
5052
5053 if (!klass) {
5054 /* bound instance method of module */
5055 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5056 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5057 }
5058 else {
5059 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5060 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5061
5062 // define_method can cache for different method id
5063 if (cached_cme == NULL) {
5064 // empty_cc_for_super is not markable object
5065 cd->cc = empty_cc_for_super();
5066 }
5067 else if (cached_cme->called_id != mid) {
5068 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5069 if (cme) {
5070 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5071 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5072 }
5073 else {
5074 cd->cc = cc = empty_cc_for_super();
5075 }
5076 }
5077 else {
5078 switch (cached_cme->def->type) {
5079 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5080 case VM_METHOD_TYPE_REFINED:
5081 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5082 case VM_METHOD_TYPE_ATTRSET:
5083 case VM_METHOD_TYPE_IVAR:
5084 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5085 break;
5086 default:
5087 break; // use fastpath
5088 }
5089 }
5090 }
5091
5092 VM_ASSERT((vm_cc_cme(cc), true));
5093
5094 return cc;
5095}
5096
5097/* yield */
5098
5099static inline int
5100block_proc_is_lambda(const VALUE procval)
5101{
5102 rb_proc_t *proc;
5103
5104 if (procval) {
5105 GetProcPtr(procval, proc);
5106 return proc->is_lambda;
5107 }
5108 else {
5109 return 0;
5110 }
5111}
5112
5113static inline const rb_namespace_t *
5114block_proc_namespace(const VALUE procval)
5115{
5116 rb_proc_t *proc;
5117
5118 if (procval) {
5119 GetProcPtr(procval, proc);
5120 return proc->ns;
5121 }
5122 else {
5123 return NULL;
5124 }
5125}
5126
5127static VALUE
5128vm_yield_with_cfunc(rb_execution_context_t *ec,
5129 const struct rb_captured_block *captured,
5130 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5132{
5133 int is_lambda = FALSE; /* TODO */
5134 VALUE val, arg, blockarg;
5135 int frame_flag;
5136 const struct vm_ifunc *ifunc = captured->code.ifunc;
5137
5138 if (is_lambda) {
5139 arg = rb_ary_new4(argc, argv);
5140 }
5141 else if (argc == 0) {
5142 arg = Qnil;
5143 }
5144 else {
5145 arg = argv[0];
5146 }
5147
5148 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5149
5150 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5151 if (kw_splat) {
5152 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5153 }
5154
5155 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5156 frame_flag,
5157 self,
5158 VM_GUARDED_PREV_EP(captured->ep),
5159 (VALUE)me,
5160 0, ec->cfp->sp, 0, 0);
5161 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5162 rb_vm_pop_frame(ec);
5163
5164 return val;
5165}
5166
5167VALUE
5168rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5169{
5170 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5171}
5172
5173static VALUE
5174vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5175{
5176 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5177}
5178
5179static inline int
5180vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5181{
5182 int i;
5183 long len = RARRAY_LEN(ary);
5184
5185 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5186
5187 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5188 argv[i] = RARRAY_AREF(ary, i);
5189 }
5190
5191 return i;
5192}
5193
5194static inline VALUE
5195vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5196{
5197 VALUE ary, arg0 = argv[0];
5198 ary = rb_check_array_type(arg0);
5199#if 0
5200 argv[0] = arg0;
5201#else
5202 VM_ASSERT(argv[0] == arg0);
5203#endif
5204 return ary;
5205}
5206
5207static int
5208vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5209{
5210 if (rb_simple_iseq_p(iseq)) {
5211 rb_control_frame_t *cfp = ec->cfp;
5212 VALUE arg0;
5213
5214 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5215
5216 if (arg_setup_type == arg_setup_block &&
5217 calling->argc == 1 &&
5218 ISEQ_BODY(iseq)->param.flags.has_lead &&
5219 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5220 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5221 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5222 }
5223
5224 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5225 if (arg_setup_type == arg_setup_block) {
5226 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5227 int i;
5228 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5229 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5230 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5231 }
5232 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5233 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5234 }
5235 }
5236 else {
5237 argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5238 }
5239 }
5240
5241 return 0;
5242 }
5243 else {
5244 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5245 }
5246}
5247
5248static int
5249vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5250{
5251 struct rb_calling_info calling_entry, *calling;
5252
5253 calling = &calling_entry;
5254 calling->argc = argc;
5255 calling->block_handler = block_handler;
5256 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5257 calling->recv = Qundef;
5258 calling->heap_argv = 0;
5259 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5260
5261 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5262}
5263
5264/* ruby iseq -> ruby block */
5265
5266static VALUE
5267vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5268 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5269 bool is_lambda, VALUE block_handler)
5270{
5271 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5272 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5273 const int arg_size = ISEQ_BODY(iseq)->param.size;
5274 VALUE * const rsp = GET_SP() - calling->argc;
5275 VALUE * const argv = rsp;
5276 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5277 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5278
5279 SET_SP(rsp);
5280
5281 if (calling->proc_ns) {
5282 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5283 }
5284
5285 vm_push_frame(ec, iseq,
5286 frame_flag,
5287 captured->self,
5288 VM_GUARDED_PREV_EP(captured->ep), 0,
5289 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5290 rsp + arg_size,
5291 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5292
5293 return Qundef;
5294}
5295
5296static VALUE
5297vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5298 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5299 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5300{
5301 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5302 int flags = vm_ci_flag(ci);
5303
5304 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5305 ((calling->argc == 0) ||
5306 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5307 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5308 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5309 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5310 flags = 0;
5311 if (UNLIKELY(calling->heap_argv)) {
5312#if VM_ARGC_STACK_MAX < 0
5313 if (RARRAY_LEN(calling->heap_argv) < 1) {
5314 rb_raise(rb_eArgError, "no receiver given");
5315 }
5316#endif
5317 calling->recv = rb_ary_shift(calling->heap_argv);
5318 // Modify stack to avoid cfp consistency error
5319 reg_cfp->sp++;
5320 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5321 reg_cfp->sp[-2] = calling->recv;
5322 flags |= VM_CALL_ARGS_SPLAT;
5323 }
5324 else {
5325 if (calling->argc < 1) {
5326 rb_raise(rb_eArgError, "no receiver given");
5327 }
5328 calling->recv = TOPN(--calling->argc);
5329 }
5330 if (calling->kw_splat) {
5331 flags |= VM_CALL_KW_SPLAT;
5332 }
5333 }
5334 else {
5335 if (calling->argc < 1) {
5336 rb_raise(rb_eArgError, "no receiver given");
5337 }
5338 calling->recv = TOPN(--calling->argc);
5339 }
5340
5341 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5342}
5343
5344static VALUE
5345vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5346 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5347 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5348{
5349 VALUE val;
5350 int argc;
5351 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5352 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5353 argc = calling->argc;
5354 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5355 POPN(argc); /* TODO: should put before C/yield? */
5356 return val;
5357}
5358
5359static VALUE
5360vm_proc_to_block_handler(VALUE procval)
5361{
5362 const struct rb_block *block = vm_proc_block(procval);
5363
5364 switch (vm_block_type(block)) {
5365 case block_type_iseq:
5366 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5367 case block_type_ifunc:
5368 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5369 case block_type_symbol:
5370 return VM_BH_FROM_SYMBOL(block->as.symbol);
5371 case block_type_proc:
5372 return VM_BH_FROM_PROC(block->as.proc);
5373 }
5374 VM_UNREACHABLE(vm_yield_with_proc);
5375 return Qundef;
5376}
5377
5378static VALUE
5379vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5380 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5381 bool is_lambda, VALUE block_handler)
5382{
5383 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5384 VALUE proc = VM_BH_TO_PROC(block_handler);
5385 if (!calling->proc_ns) {
5386 calling->proc_ns = block_proc_namespace(proc);
5387 }
5388 is_lambda = block_proc_is_lambda(proc);
5389 block_handler = vm_proc_to_block_handler(proc);
5390 }
5391
5392 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5393}
5394
5395static inline VALUE
5396vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5397 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5398 bool is_lambda, VALUE block_handler)
5399{
5400 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5401 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5402 bool is_lambda, VALUE block_handler);
5403
5404 switch (vm_block_handler_type(block_handler)) {
5405 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5406 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5407 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5408 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5409 default: rb_bug("vm_invoke_block: unreachable");
5410 }
5411
5412 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5413}
5414
5415static VALUE
5416vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5417{
5418 const rb_execution_context_t *ec = GET_EC();
5419 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5420 struct rb_captured_block *captured;
5421
5422 if (cfp == 0) {
5423 rb_bug("vm_make_proc_with_iseq: unreachable");
5424 }
5425
5426 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5427 captured->code.iseq = blockiseq;
5428
5429 return rb_vm_make_proc(ec, captured, rb_cProc);
5430}
5431
5432static VALUE
5433vm_once_exec(VALUE iseq)
5434{
5435 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5436 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5437}
5438
5439static VALUE
5440vm_once_clear(VALUE data)
5441{
5442 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5443 is->once.running_thread = NULL;
5444 return Qnil;
5445}
5446
5447/* defined insn */
5448
5449static bool
5450check_respond_to_missing(VALUE obj, VALUE v)
5451{
5452 VALUE args[2];
5453 VALUE r;
5454
5455 args[0] = obj; args[1] = Qfalse;
5456 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5457 if (!UNDEF_P(r) && RTEST(r)) {
5458 return true;
5459 }
5460 else {
5461 return false;
5462 }
5463}
5464
5465static bool
5466vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5467{
5468 VALUE klass;
5469 enum defined_type type = (enum defined_type)op_type;
5470
5471 switch (type) {
5472 case DEFINED_IVAR:
5473 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5474 break;
5475 case DEFINED_GVAR:
5476 return rb_gvar_defined(SYM2ID(obj));
5477 break;
5478 case DEFINED_CVAR: {
5479 const rb_cref_t *cref = vm_get_cref(GET_EP());
5480 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5481 return rb_cvar_defined(klass, SYM2ID(obj));
5482 break;
5483 }
5484 case DEFINED_CONST:
5485 case DEFINED_CONST_FROM: {
5486 bool allow_nil = type == DEFINED_CONST;
5487 klass = v;
5488 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5489 break;
5490 }
5491 case DEFINED_FUNC:
5492 klass = CLASS_OF(v);
5493 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5494 break;
5495 case DEFINED_METHOD:{
5496 VALUE klass = CLASS_OF(v);
5497 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5498
5499 if (me) {
5500 switch (METHOD_ENTRY_VISI(me)) {
5501 case METHOD_VISI_PRIVATE:
5502 break;
5503 case METHOD_VISI_PROTECTED:
5504 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5505 break;
5506 }
5507 case METHOD_VISI_PUBLIC:
5508 return true;
5509 break;
5510 default:
5511 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5512 }
5513 }
5514 else {
5515 return check_respond_to_missing(obj, v);
5516 }
5517 break;
5518 }
5519 case DEFINED_YIELD:
5520 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5521 return true;
5522 }
5523 break;
5524 case DEFINED_ZSUPER:
5525 {
5526 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5527
5528 if (me) {
5529 VALUE klass = vm_search_normal_superclass(me->defined_class);
5530 if (!klass) return false;
5531
5532 ID id = me->def->original_id;
5533
5534 return rb_method_boundp(klass, id, 0);
5535 }
5536 }
5537 break;
5538 case DEFINED_REF:
5539 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5540 default:
5541 rb_bug("unimplemented defined? type (VM)");
5542 break;
5543 }
5544
5545 return false;
5546}
5547
5548bool
5549rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5550{
5551 return vm_defined(ec, reg_cfp, op_type, obj, v);
5552}
5553
5554static const VALUE *
5555vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5556{
5557 rb_num_t i;
5558 const VALUE *ep = reg_ep;
5559 for (i = 0; i < lv; i++) {
5560 ep = GET_PREV_EP(ep);
5561 }
5562 return ep;
5563}
5564
5565static VALUE
5566vm_get_special_object(const VALUE *const reg_ep,
5567 enum vm_special_object_type type)
5568{
5569 switch (type) {
5570 case VM_SPECIAL_OBJECT_VMCORE:
5571 return rb_mRubyVMFrozenCore;
5572 case VM_SPECIAL_OBJECT_CBASE:
5573 return vm_get_cbase(reg_ep);
5574 case VM_SPECIAL_OBJECT_CONST_BASE:
5575 return vm_get_const_base(reg_ep);
5576 default:
5577 rb_bug("putspecialobject insn: unknown value_type %d", type);
5578 }
5579}
5580
5581static VALUE
5582vm_concat_array(VALUE ary1, VALUE ary2st)
5583{
5584 const VALUE ary2 = ary2st;
5585 VALUE tmp1 = rb_check_to_array(ary1);
5586 VALUE tmp2 = rb_check_to_array(ary2);
5587
5588 if (NIL_P(tmp1)) {
5589 tmp1 = rb_ary_new3(1, ary1);
5590 }
5591 if (tmp1 == ary1) {
5592 tmp1 = rb_ary_dup(ary1);
5593 }
5594
5595 if (NIL_P(tmp2)) {
5596 return rb_ary_push(tmp1, ary2);
5597 }
5598 else {
5599 return rb_ary_concat(tmp1, tmp2);
5600 }
5601}
5602
5603static VALUE
5604vm_concat_to_array(VALUE ary1, VALUE ary2st)
5605{
5606 /* ary1 must be a newly created array */
5607 const VALUE ary2 = ary2st;
5608
5609 if (NIL_P(ary2)) return ary1;
5610
5611 VALUE tmp2 = rb_check_to_array(ary2);
5612
5613 if (NIL_P(tmp2)) {
5614 return rb_ary_push(ary1, ary2);
5615 }
5616 else {
5617 return rb_ary_concat(ary1, tmp2);
5618 }
5619}
5620
5621// YJIT implementation is using the C function
5622// and needs to call a non-static function
5623VALUE
5624rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5625{
5626 return vm_concat_array(ary1, ary2st);
5627}
5628
5629VALUE
5630rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5631{
5632 return vm_concat_to_array(ary1, ary2st);
5633}
5634
5635static VALUE
5636vm_splat_array(VALUE flag, VALUE ary)
5637{
5638 if (NIL_P(ary)) {
5639 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5640 }
5641 VALUE tmp = rb_check_to_array(ary);
5642 if (NIL_P(tmp)) {
5643 return rb_ary_new3(1, ary);
5644 }
5645 else if (RTEST(flag)) {
5646 return rb_ary_dup(tmp);
5647 }
5648 else {
5649 return tmp;
5650 }
5651}
5652
5653// YJIT implementation is using the C function
5654// and needs to call a non-static function
5655VALUE
5656rb_vm_splat_array(VALUE flag, VALUE ary)
5657{
5658 return vm_splat_array(flag, ary);
5659}
5660
5661static VALUE
5662vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5663{
5664 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5665
5666 if (flag & VM_CHECKMATCH_ARRAY) {
5667 long i;
5668 const long n = RARRAY_LEN(pattern);
5669
5670 for (i = 0; i < n; i++) {
5671 VALUE v = RARRAY_AREF(pattern, i);
5672 VALUE c = check_match(ec, v, target, type);
5673
5674 if (RTEST(c)) {
5675 return c;
5676 }
5677 }
5678 return Qfalse;
5679 }
5680 else {
5681 return check_match(ec, pattern, target, type);
5682 }
5683}
5684
5685VALUE
5686rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5687{
5688 return vm_check_match(ec, target, pattern, flag);
5689}
5690
5691static VALUE
5692vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5693{
5694 const VALUE kw_bits = *(ep - bits);
5695
5696 if (FIXNUM_P(kw_bits)) {
5697 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5698 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5699 return Qfalse;
5700 }
5701 else {
5702 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5703 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5704 }
5705 return Qtrue;
5706}
5707
5708static void
5709vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5710{
5711 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5712 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5713 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5714 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5715
5716 switch (flag) {
5717 case RUBY_EVENT_CALL:
5718 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5719 return;
5720 case RUBY_EVENT_C_CALL:
5721 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5722 return;
5723 case RUBY_EVENT_RETURN:
5724 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5725 return;
5727 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5728 return;
5729 }
5730 }
5731}
5732
5733static VALUE
5734vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5735{
5736 if (!rb_const_defined_at(cbase, id)) {
5737 return 0;
5738 }
5739 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5740 return rb_public_const_get_at(cbase, id);
5741 }
5742 else {
5743 return rb_const_get_at(cbase, id);
5744 }
5745}
5746
5747static VALUE
5748vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5749{
5750 if (!RB_TYPE_P(klass, T_CLASS)) {
5751 return 0;
5752 }
5753 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5754 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5755
5756 if (tmp != super) {
5757 rb_raise(rb_eTypeError,
5758 "superclass mismatch for class %"PRIsVALUE"",
5759 rb_id2str(id));
5760 }
5761 else {
5762 return klass;
5763 }
5764 }
5765 else {
5766 return klass;
5767 }
5768}
5769
5770static VALUE
5771vm_check_if_module(ID id, VALUE mod)
5772{
5773 if (!RB_TYPE_P(mod, T_MODULE)) {
5774 return 0;
5775 }
5776 else {
5777 return mod;
5778 }
5779}
5780
5781static VALUE
5782declare_under(ID id, VALUE cbase, VALUE c)
5783{
5784 rb_set_class_path_string(c, cbase, rb_id2str(id));
5785 rb_const_set(cbase, id, c);
5786 return c;
5787}
5788
5789static VALUE
5790vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5791{
5792 /* new class declaration */
5793 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5794 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5796 rb_class_inherited(s, c);
5797 return c;
5798}
5799
5800static VALUE
5801vm_declare_module(ID id, VALUE cbase)
5802{
5803 /* new module declaration */
5804 return declare_under(id, cbase, rb_module_new());
5805}
5806
5807NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5808static void
5809unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5810{
5811 VALUE name = rb_id2str(id);
5812 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5813 name, type);
5814 VALUE location = rb_const_source_location_at(cbase, id);
5815 if (!NIL_P(location)) {
5816 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5817 " previous definition of %"PRIsVALUE" was here",
5818 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5819 }
5821}
5822
5823static VALUE
5824vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5825{
5826 VALUE klass;
5827
5828 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5829 rb_raise(rb_eTypeError,
5830 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5831 rb_obj_class(super));
5832 }
5833
5834 vm_check_if_namespace(cbase);
5835
5836 /* find klass */
5837 rb_autoload_load(cbase, id);
5838
5839 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5840 if (!vm_check_if_class(id, flags, super, klass))
5841 unmatched_redefinition("class", cbase, id, klass);
5842 return klass;
5843 }
5844 else {
5845 return vm_declare_class(id, flags, cbase, super);
5846 }
5847}
5848
5849static VALUE
5850vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5851{
5852 VALUE mod;
5853
5854 vm_check_if_namespace(cbase);
5855 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5856 if (!vm_check_if_module(id, mod))
5857 unmatched_redefinition("module", cbase, id, mod);
5858 return mod;
5859 }
5860 else {
5861 return vm_declare_module(id, cbase);
5862 }
5863}
5864
5865static VALUE
5866vm_find_or_create_class_by_id(ID id,
5867 rb_num_t flags,
5868 VALUE cbase,
5869 VALUE super)
5870{
5871 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5872
5873 switch (type) {
5874 case VM_DEFINECLASS_TYPE_CLASS:
5875 /* classdef returns class scope value */
5876 return vm_define_class(id, flags, cbase, super);
5877
5878 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5879 /* classdef returns class scope value */
5880 return rb_singleton_class(cbase);
5881
5882 case VM_DEFINECLASS_TYPE_MODULE:
5883 /* classdef returns class scope value */
5884 return vm_define_module(id, flags, cbase);
5885
5886 default:
5887 rb_bug("unknown defineclass type: %d", (int)type);
5888 }
5889}
5890
5891static rb_method_visibility_t
5892vm_scope_visibility_get(const rb_execution_context_t *ec)
5893{
5894 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5895
5896 if (!vm_env_cref_by_cref(cfp->ep)) {
5897 return METHOD_VISI_PUBLIC;
5898 }
5899 else {
5900 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5901 }
5902}
5903
5904static int
5905vm_scope_module_func_check(const rb_execution_context_t *ec)
5906{
5907 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5908
5909 if (!vm_env_cref_by_cref(cfp->ep)) {
5910 return FALSE;
5911 }
5912 else {
5913 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5914 }
5915}
5916
5917static void
5918vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5919{
5920 VALUE klass;
5921 rb_method_visibility_t visi;
5922 rb_cref_t *cref = vm_ec_cref(ec);
5923
5924 if (is_singleton) {
5925 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5926 visi = METHOD_VISI_PUBLIC;
5927 }
5928 else {
5929 klass = CREF_CLASS_FOR_DEFINITION(cref);
5930 visi = vm_scope_visibility_get(ec);
5931 }
5932
5933 if (NIL_P(klass)) {
5934 rb_raise(rb_eTypeError, "no class/module to add method");
5935 }
5936
5937 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5938 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5939 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5940 RCLASS_WRITE_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
5941 }
5942
5943 if (!is_singleton && vm_scope_module_func_check(ec)) {
5944 klass = rb_singleton_class(klass);
5945 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5946 }
5947}
5948
5949static VALUE
5950vm_invokeblock_i(struct rb_execution_context_struct *ec,
5951 struct rb_control_frame_struct *reg_cfp,
5952 struct rb_calling_info *calling)
5953{
5954 const struct rb_callinfo *ci = calling->cd->ci;
5955 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5956
5957 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5958 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5959 }
5960 else {
5961 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5962 }
5963}
5964
5965enum method_explorer_type {
5966 mexp_search_method,
5967 mexp_search_invokeblock,
5968 mexp_search_super,
5969};
5970
5971static inline VALUE
5972vm_sendish(
5973 struct rb_execution_context_struct *ec,
5974 struct rb_control_frame_struct *reg_cfp,
5975 struct rb_call_data *cd,
5976 VALUE block_handler,
5977 enum method_explorer_type method_explorer
5978) {
5979 VALUE val = Qundef;
5980 const struct rb_callinfo *ci = cd->ci;
5981 const struct rb_callcache *cc;
5982 int argc = vm_ci_argc(ci);
5983 VALUE recv = TOPN(argc);
5984 struct rb_calling_info calling = {
5985 .block_handler = block_handler,
5986 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5987 .recv = recv,
5988 .argc = argc,
5989 .cd = cd,
5990 };
5991
5992 switch (method_explorer) {
5993 case mexp_search_method:
5994 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5995 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5996 break;
5997 case mexp_search_super:
5998 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5999 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6000 break;
6001 case mexp_search_invokeblock:
6002 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6003 break;
6004 }
6005 return val;
6006}
6007
6008VALUE
6009rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6010{
6011 stack_check(ec);
6012
6013 struct rb_forwarding_call_data adjusted_cd;
6014 struct rb_callinfo adjusted_ci;
6015
6016 VALUE bh;
6017 VALUE val;
6018
6019 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6020 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6021
6022 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6023
6024 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6025 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6026 }
6027 }
6028 else {
6029 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6030 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6031 }
6032
6033 VM_EXEC(ec, val);
6034 return val;
6035}
6036
6037VALUE
6038rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6039{
6040 stack_check(ec);
6041 VALUE bh = VM_BLOCK_HANDLER_NONE;
6042 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6043 VM_EXEC(ec, val);
6044 return val;
6045}
6046
6047VALUE
6048rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6049{
6050 stack_check(ec);
6051 struct rb_forwarding_call_data adjusted_cd;
6052 struct rb_callinfo adjusted_ci;
6053
6054 VALUE bh;
6055 VALUE val;
6056
6057 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6058 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6059
6060 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6061
6062 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6063 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6064 }
6065 }
6066 else {
6067 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6068 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6069 }
6070
6071 VM_EXEC(ec, val);
6072 return val;
6073}
6074
6075VALUE
6076rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6077{
6078 stack_check(ec);
6079 VALUE bh = VM_BLOCK_HANDLER_NONE;
6080 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6081 VM_EXEC(ec, val);
6082 return val;
6083}
6084
6085/* object.c */
6086VALUE rb_nil_to_s(VALUE);
6087VALUE rb_true_to_s(VALUE);
6088VALUE rb_false_to_s(VALUE);
6089/* numeric.c */
6090VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6091VALUE rb_fix_to_s(VALUE);
6092/* variable.c */
6093VALUE rb_mod_to_s(VALUE);
6095
6096static VALUE
6097vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6098{
6099 int type = TYPE(recv);
6100 if (type == T_STRING) {
6101 return recv;
6102 }
6103
6104 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6105
6106 switch (type) {
6107 case T_SYMBOL:
6108 if (check_method_basic_definition(vm_cc_cme(cc))) {
6109 // rb_sym_to_s() allocates a mutable string, but since we are only
6110 // going to use this string for interpolation, it's fine to use the
6111 // frozen string.
6112 return rb_sym2str(recv);
6113 }
6114 break;
6115 case T_MODULE:
6116 case T_CLASS:
6117 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6118 // rb_mod_to_s() allocates a mutable string, but since we are only
6119 // going to use this string for interpolation, it's fine to use the
6120 // frozen string.
6121 VALUE val = rb_mod_name(recv);
6122 if (NIL_P(val)) {
6123 val = rb_mod_to_s(recv);
6124 }
6125 return val;
6126 }
6127 break;
6128 case T_NIL:
6129 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6130 return rb_nil_to_s(recv);
6131 }
6132 break;
6133 case T_TRUE:
6134 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6135 return rb_true_to_s(recv);
6136 }
6137 break;
6138 case T_FALSE:
6139 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6140 return rb_false_to_s(recv);
6141 }
6142 break;
6143 case T_FIXNUM:
6144 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6145 return rb_fix_to_s(recv);
6146 }
6147 break;
6148 }
6149 return Qundef;
6150}
6151
6152static VALUE
6153vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6154{
6155 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6156 return ary;
6157 }
6158 else {
6159 return Qundef;
6160 }
6161}
6162
6163static VALUE
6164vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6165{
6166 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6167 return hash;
6168 }
6169 else {
6170 return Qundef;
6171 }
6172}
6173
6174static VALUE
6175vm_opt_str_freeze(VALUE str, int bop, ID id)
6176{
6177 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6178 return str;
6179 }
6180 else {
6181 return Qundef;
6182 }
6183}
6184
6185/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6186#define id_cmp idCmp
6187
6188static VALUE
6189vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6190{
6191 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6192 return rb_ary_includes(ary, target);
6193 }
6194 else {
6195 VALUE args[1] = {target};
6196
6197 // duparray
6198 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6199 VALUE dupary = rb_ary_resurrect(ary);
6200
6201 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6202 }
6203}
6204
6205VALUE
6206rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6207{
6208 return vm_opt_duparray_include_p(ec, ary, target);
6209}
6210
6211static VALUE
6212vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6213{
6214 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6215 if (num == 0) {
6216 return Qnil;
6217 }
6218 else {
6219 VALUE result = *ptr;
6220 rb_snum_t i = num - 1;
6221 while (i-- > 0) {
6222 const VALUE v = *++ptr;
6223 if (OPTIMIZED_CMP(v, result) > 0) {
6224 result = v;
6225 }
6226 }
6227 return result;
6228 }
6229 }
6230 else {
6231 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6232 }
6233}
6234
6235VALUE
6236rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6237{
6238 return vm_opt_newarray_max(ec, num, ptr);
6239}
6240
6241static VALUE
6242vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6243{
6244 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6245 if (num == 0) {
6246 return Qnil;
6247 }
6248 else {
6249 VALUE result = *ptr;
6250 rb_snum_t i = num - 1;
6251 while (i-- > 0) {
6252 const VALUE v = *++ptr;
6253 if (OPTIMIZED_CMP(v, result) < 0) {
6254 result = v;
6255 }
6256 }
6257 return result;
6258 }
6259 }
6260 else {
6261 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6262 }
6263}
6264
6265VALUE
6266rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6267{
6268 return vm_opt_newarray_min(ec, num, ptr);
6269}
6270
6271static VALUE
6272vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6273{
6274 // If Array#hash is _not_ monkeypatched, use the optimized call
6275 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6276 return rb_ary_hash_values(num, ptr);
6277 }
6278 else {
6279 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6280 }
6281}
6282
6283VALUE
6284rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6285{
6286 return vm_opt_newarray_hash(ec, num, ptr);
6287}
6288
6289VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6290VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6291
6292static VALUE
6293vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6294{
6295 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6296 struct RArray fake_ary;
6297 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6298 return rb_ary_includes(ary, target);
6299 }
6300 else {
6301 VALUE args[1] = {target};
6302 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6303 }
6304}
6305
6306VALUE
6307rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6308{
6309 return vm_opt_newarray_include_p(ec, num, ptr, target);
6310}
6311
6312static VALUE
6313vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6314{
6315 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6316 struct RArray fake_ary;
6317 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6318 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6319 }
6320 else {
6321 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6322 // Setup an array with room for keyword hash.
6323 VALUE args[2];
6324 args[0] = fmt;
6325 int kw_splat = RB_NO_KEYWORDS;
6326 int argc = 1;
6327
6328 if (!UNDEF_P(buffer)) {
6329 args[1] = rb_hash_new_with_size(1);
6330 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6331 kw_splat = RB_PASS_KEYWORDS;
6332 argc++;
6333 }
6334
6335 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6336 }
6337}
6338
6339VALUE
6340rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6341{
6342 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6343}
6344
6345VALUE
6346rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6347{
6348 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6349}
6350
6351#undef id_cmp
6352
6353static void
6354vm_track_constant_cache(ID id, void *ic)
6355{
6356 rb_vm_t *vm = GET_VM();
6357 struct rb_id_table *const_cache = vm->constant_cache;
6358 VALUE lookup_result;
6359 set_table *ics;
6360
6361 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6362 ics = (set_table *)lookup_result;
6363 }
6364 else {
6365 ics = set_init_numtable();
6366 rb_id_table_insert(const_cache, id, (VALUE)ics);
6367 }
6368
6369 /* The call below to st_insert could allocate which could trigger a GC.
6370 * If it triggers a GC, it may free an iseq that also holds a cache to this
6371 * constant. If that iseq is the last iseq with a cache to this constant, then
6372 * it will free this ST table, which would cause an use-after-free during this
6373 * st_insert.
6374 *
6375 * So to fix this issue, we store the ID that is currently being inserted
6376 * and, in remove_from_constant_cache, we don't free the ST table for ID
6377 * equal to this one.
6378 *
6379 * See [Bug #20921].
6380 */
6381 vm->inserting_constant_cache_id = id;
6382
6383 set_insert(ics, (st_data_t)ic);
6384
6385 vm->inserting_constant_cache_id = (ID)0;
6386}
6387
6388static void
6389vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6390{
6391 RB_VM_LOCK_ENTER();
6392
6393 for (int i = 0; segments[i]; i++) {
6394 ID id = segments[i];
6395 if (id == idNULL) continue;
6396 vm_track_constant_cache(id, ic);
6397 }
6398
6399 RB_VM_LOCK_LEAVE();
6400}
6401
6402// For JIT inlining
6403static inline bool
6404vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6405{
6406 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6407 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6408
6409 return (ic_cref == NULL || // no need to check CREF
6410 ic_cref == vm_get_cref(reg_ep));
6411 }
6412 return false;
6413}
6414
6415static bool
6416vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6417{
6418 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6419 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6420}
6421
6422// YJIT needs this function to never allocate and never raise
6423bool
6424rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6425{
6426 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6427}
6428
6429static void
6430vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6431{
6432 if (ruby_vm_const_missing_count > 0) {
6433 ruby_vm_const_missing_count = 0;
6434 ic->entry = NULL;
6435 return;
6436 }
6437
6438 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6439 RB_OBJ_WRITE(ice, &ice->value, val);
6440 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6441 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6442 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6443
6444 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6445 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6446 rb_yjit_constant_ic_update(iseq, ic, pos);
6447}
6448
6449VALUE
6450rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6451{
6452 VALUE val;
6453 const ID *segments = ic->segments;
6454 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6455 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6456 val = ice->value;
6457
6458 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6459 }
6460 else {
6461 ruby_vm_constant_cache_misses++;
6462 val = vm_get_ev_const_chain(ec, segments);
6463 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6464 // Undo the PC increment to get the address to this instruction
6465 // INSN_ATTR(width) == 2
6466 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6467 }
6468 return val;
6469}
6470
6471static VALUE
6472vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6473{
6474 rb_thread_t *th = rb_ec_thread_ptr(ec);
6475 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6476
6477 again:
6478 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6479 return is->once.value;
6480 }
6481 else if (is->once.running_thread == NULL) {
6482 VALUE val;
6483 is->once.running_thread = th;
6484 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6485 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6486 /* is->once.running_thread is cleared by vm_once_clear() */
6487 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6488 return val;
6489 }
6490 else if (is->once.running_thread == th) {
6491 /* recursive once */
6492 return vm_once_exec((VALUE)iseq);
6493 }
6494 else {
6495 /* waiting for finish */
6496 RUBY_VM_CHECK_INTS(ec);
6498 goto again;
6499 }
6500}
6501
6502static OFFSET
6503vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6504{
6505 switch (OBJ_BUILTIN_TYPE(key)) {
6506 case -1:
6507 case T_FLOAT:
6508 case T_SYMBOL:
6509 case T_BIGNUM:
6510 case T_STRING:
6511 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6512 SYMBOL_REDEFINED_OP_FLAG |
6513 INTEGER_REDEFINED_OP_FLAG |
6514 FLOAT_REDEFINED_OP_FLAG |
6515 NIL_REDEFINED_OP_FLAG |
6516 TRUE_REDEFINED_OP_FLAG |
6517 FALSE_REDEFINED_OP_FLAG |
6518 STRING_REDEFINED_OP_FLAG)) {
6519 st_data_t val;
6520 if (RB_FLOAT_TYPE_P(key)) {
6521 double kval = RFLOAT_VALUE(key);
6522 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6523 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6524 }
6525 }
6526 if (rb_hash_stlike_lookup(hash, key, &val)) {
6527 return FIX2LONG((VALUE)val);
6528 }
6529 else {
6530 return else_offset;
6531 }
6532 }
6533 }
6534 return 0;
6535}
6536
6537NORETURN(static void
6538 vm_stack_consistency_error(const rb_execution_context_t *ec,
6539 const rb_control_frame_t *,
6540 const VALUE *));
6541static void
6542vm_stack_consistency_error(const rb_execution_context_t *ec,
6543 const rb_control_frame_t *cfp,
6544 const VALUE *bp)
6545{
6546 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6547 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6548 static const char stack_consistency_error[] =
6549 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6550#if defined RUBY_DEVEL
6551 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6552 rb_str_cat_cstr(mesg, "\n");
6553 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6555#else
6556 rb_bug(stack_consistency_error, nsp, nbp);
6557#endif
6558}
6559
6560static VALUE
6561vm_opt_plus(VALUE recv, VALUE obj)
6562{
6563 if (FIXNUM_2_P(recv, obj) &&
6564 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6565 return rb_fix_plus_fix(recv, obj);
6566 }
6567 else if (FLONUM_2_P(recv, obj) &&
6568 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6569 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6570 }
6571 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6572 return Qundef;
6573 }
6574 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6575 RBASIC_CLASS(obj) == rb_cFloat &&
6576 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6577 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6578 }
6579 else if (RBASIC_CLASS(recv) == rb_cString &&
6580 RBASIC_CLASS(obj) == rb_cString &&
6581 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6582 return rb_str_opt_plus(recv, obj);
6583 }
6584 else if (RBASIC_CLASS(recv) == rb_cArray &&
6585 RBASIC_CLASS(obj) == rb_cArray &&
6586 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6587 return rb_ary_plus(recv, obj);
6588 }
6589 else {
6590 return Qundef;
6591 }
6592}
6593
6594static VALUE
6595vm_opt_minus(VALUE recv, VALUE obj)
6596{
6597 if (FIXNUM_2_P(recv, obj) &&
6598 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6599 return rb_fix_minus_fix(recv, obj);
6600 }
6601 else if (FLONUM_2_P(recv, obj) &&
6602 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6603 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6604 }
6605 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6606 return Qundef;
6607 }
6608 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6609 RBASIC_CLASS(obj) == rb_cFloat &&
6610 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6611 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6612 }
6613 else {
6614 return Qundef;
6615 }
6616}
6617
6618static VALUE
6619vm_opt_mult(VALUE recv, VALUE obj)
6620{
6621 if (FIXNUM_2_P(recv, obj) &&
6622 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6623 return rb_fix_mul_fix(recv, obj);
6624 }
6625 else if (FLONUM_2_P(recv, obj) &&
6626 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6627 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6628 }
6629 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6630 return Qundef;
6631 }
6632 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6633 RBASIC_CLASS(obj) == rb_cFloat &&
6634 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6635 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6636 }
6637 else {
6638 return Qundef;
6639 }
6640}
6641
6642static VALUE
6643vm_opt_div(VALUE recv, VALUE obj)
6644{
6645 if (FIXNUM_2_P(recv, obj) &&
6646 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6647 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6648 }
6649 else if (FLONUM_2_P(recv, obj) &&
6650 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6651 return rb_flo_div_flo(recv, obj);
6652 }
6653 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6654 return Qundef;
6655 }
6656 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6657 RBASIC_CLASS(obj) == rb_cFloat &&
6658 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6659 return rb_flo_div_flo(recv, obj);
6660 }
6661 else {
6662 return Qundef;
6663 }
6664}
6665
6666static VALUE
6667vm_opt_mod(VALUE recv, VALUE obj)
6668{
6669 if (FIXNUM_2_P(recv, obj) &&
6670 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6671 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6672 }
6673 else if (FLONUM_2_P(recv, obj) &&
6674 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6675 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6676 }
6677 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6678 return Qundef;
6679 }
6680 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6681 RBASIC_CLASS(obj) == rb_cFloat &&
6682 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6683 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6684 }
6685 else {
6686 return Qundef;
6687 }
6688}
6689
6690static VALUE
6691vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6692{
6693 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6694 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6695
6696 if (!UNDEF_P(val)) {
6697 return RBOOL(!RTEST(val));
6698 }
6699 }
6700
6701 return Qundef;
6702}
6703
6704static VALUE
6705vm_opt_lt(VALUE recv, VALUE obj)
6706{
6707 if (FIXNUM_2_P(recv, obj) &&
6708 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6709 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6710 }
6711 else if (FLONUM_2_P(recv, obj) &&
6712 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6713 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6714 }
6715 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6716 return Qundef;
6717 }
6718 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6719 RBASIC_CLASS(obj) == rb_cFloat &&
6720 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6721 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6722 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6723 }
6724 else {
6725 return Qundef;
6726 }
6727}
6728
6729static VALUE
6730vm_opt_le(VALUE recv, VALUE obj)
6731{
6732 if (FIXNUM_2_P(recv, obj) &&
6733 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6734 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6735 }
6736 else if (FLONUM_2_P(recv, obj) &&
6737 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6738 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6739 }
6740 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6741 return Qundef;
6742 }
6743 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6744 RBASIC_CLASS(obj) == rb_cFloat &&
6745 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6746 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6747 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6748 }
6749 else {
6750 return Qundef;
6751 }
6752}
6753
6754static VALUE
6755vm_opt_gt(VALUE recv, VALUE obj)
6756{
6757 if (FIXNUM_2_P(recv, obj) &&
6758 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6759 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6760 }
6761 else if (FLONUM_2_P(recv, obj) &&
6762 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6763 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6764 }
6765 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6766 return Qundef;
6767 }
6768 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6769 RBASIC_CLASS(obj) == rb_cFloat &&
6770 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6771 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6772 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6773 }
6774 else {
6775 return Qundef;
6776 }
6777}
6778
6779static VALUE
6780vm_opt_ge(VALUE recv, VALUE obj)
6781{
6782 if (FIXNUM_2_P(recv, obj) &&
6783 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6784 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6785 }
6786 else if (FLONUM_2_P(recv, obj) &&
6787 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6788 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6789 }
6790 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6791 return Qundef;
6792 }
6793 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6794 RBASIC_CLASS(obj) == rb_cFloat &&
6795 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6796 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6797 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6798 }
6799 else {
6800 return Qundef;
6801 }
6802}
6803
6804
6805static VALUE
6806vm_opt_ltlt(VALUE recv, VALUE obj)
6807{
6808 if (SPECIAL_CONST_P(recv)) {
6809 return Qundef;
6810 }
6811 else if (RBASIC_CLASS(recv) == rb_cString &&
6812 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6813 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6814 return rb_str_buf_append(recv, obj);
6815 }
6816 else {
6817 return rb_str_concat(recv, obj);
6818 }
6819 }
6820 else if (RBASIC_CLASS(recv) == rb_cArray &&
6821 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6822 return rb_ary_push(recv, obj);
6823 }
6824 else {
6825 return Qundef;
6826 }
6827}
6828
6829static VALUE
6830vm_opt_and(VALUE recv, VALUE obj)
6831{
6832 // If recv and obj are both fixnums, then the bottom tag bit
6833 // will be 1 on both. 1 & 1 == 1, so the result value will also
6834 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6835 // will be 0, and we return Qundef.
6836 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6837
6838 if (FIXNUM_P(ret) &&
6839 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6840 return ret;
6841 }
6842 else {
6843 return Qundef;
6844 }
6845}
6846
6847static VALUE
6848vm_opt_or(VALUE recv, VALUE obj)
6849{
6850 if (FIXNUM_2_P(recv, obj) &&
6851 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6852 return recv | obj;
6853 }
6854 else {
6855 return Qundef;
6856 }
6857}
6858
6859static VALUE
6860vm_opt_aref(VALUE recv, VALUE obj)
6861{
6862 if (SPECIAL_CONST_P(recv)) {
6863 if (FIXNUM_2_P(recv, obj) &&
6864 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6865 return rb_fix_aref(recv, obj);
6866 }
6867 return Qundef;
6868 }
6869 else if (RBASIC_CLASS(recv) == rb_cArray &&
6870 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6871 if (FIXNUM_P(obj)) {
6872 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6873 }
6874 else {
6875 return rb_ary_aref1(recv, obj);
6876 }
6877 }
6878 else if (RBASIC_CLASS(recv) == rb_cHash &&
6879 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6880 return rb_hash_aref(recv, obj);
6881 }
6882 else {
6883 return Qundef;
6884 }
6885}
6886
6887static VALUE
6888vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6889{
6890 if (SPECIAL_CONST_P(recv)) {
6891 return Qundef;
6892 }
6893 else if (RBASIC_CLASS(recv) == rb_cArray &&
6894 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6895 FIXNUM_P(obj)) {
6896 rb_ary_store(recv, FIX2LONG(obj), set);
6897 return set;
6898 }
6899 else if (RBASIC_CLASS(recv) == rb_cHash &&
6900 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6901 rb_hash_aset(recv, obj, set);
6902 return set;
6903 }
6904 else {
6905 return Qundef;
6906 }
6907}
6908
6909static VALUE
6910vm_opt_aref_with(VALUE recv, VALUE key)
6911{
6912 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6913 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6914 rb_hash_compare_by_id_p(recv) == Qfalse &&
6915 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6916 return rb_hash_aref(recv, key);
6917 }
6918 else {
6919 return Qundef;
6920 }
6921}
6922
6923VALUE
6924rb_vm_opt_aref_with(VALUE recv, VALUE key)
6925{
6926 return vm_opt_aref_with(recv, key);
6927}
6928
6929static VALUE
6930vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6931{
6932 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6933 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6934 rb_hash_compare_by_id_p(recv) == Qfalse) {
6935 return rb_hash_aset(recv, key, val);
6936 }
6937 else {
6938 return Qundef;
6939 }
6940}
6941
6942VALUE
6943rb_vm_opt_aset_with(VALUE recv, VALUE key, VALUE value)
6944{
6945 return vm_opt_aset_with(recv, key, value);
6946}
6947
6948static VALUE
6949vm_opt_length(VALUE recv, int bop)
6950{
6951 if (SPECIAL_CONST_P(recv)) {
6952 return Qundef;
6953 }
6954 else if (RBASIC_CLASS(recv) == rb_cString &&
6955 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6956 if (bop == BOP_EMPTY_P) {
6957 return LONG2NUM(RSTRING_LEN(recv));
6958 }
6959 else {
6960 return rb_str_length(recv);
6961 }
6962 }
6963 else if (RBASIC_CLASS(recv) == rb_cArray &&
6964 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6965 return LONG2NUM(RARRAY_LEN(recv));
6966 }
6967 else if (RBASIC_CLASS(recv) == rb_cHash &&
6968 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6969 return INT2FIX(RHASH_SIZE(recv));
6970 }
6971 else {
6972 return Qundef;
6973 }
6974}
6975
6976static VALUE
6977vm_opt_empty_p(VALUE recv)
6978{
6979 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6980 case Qundef: return Qundef;
6981 case INT2FIX(0): return Qtrue;
6982 default: return Qfalse;
6983 }
6984}
6985
6986VALUE rb_false(VALUE obj);
6987
6988static VALUE
6989vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6990{
6991 if (NIL_P(recv) &&
6992 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6993 return Qtrue;
6994 }
6995 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6996 return Qfalse;
6997 }
6998 else {
6999 return Qundef;
7000 }
7001}
7002
7003static VALUE
7004fix_succ(VALUE x)
7005{
7006 switch (x) {
7007 case ~0UL:
7008 /* 0xFFFF_FFFF == INT2FIX(-1)
7009 * `-1.succ` is of course 0. */
7010 return INT2FIX(0);
7011 case RSHIFT(~0UL, 1):
7012 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7013 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7014 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7015 default:
7016 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7017 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7018 * == lx*2 + ly*2 + 1
7019 * == (lx*2+1) + (ly*2+1) - 1
7020 * == x + y - 1
7021 *
7022 * Here, if we put y := INT2FIX(1):
7023 *
7024 * == x + INT2FIX(1) - 1
7025 * == x + 2 .
7026 */
7027 return x + 2;
7028 }
7029}
7030
7031static VALUE
7032vm_opt_succ(VALUE recv)
7033{
7034 if (FIXNUM_P(recv) &&
7035 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7036 return fix_succ(recv);
7037 }
7038 else if (SPECIAL_CONST_P(recv)) {
7039 return Qundef;
7040 }
7041 else if (RBASIC_CLASS(recv) == rb_cString &&
7042 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7043 return rb_str_succ(recv);
7044 }
7045 else {
7046 return Qundef;
7047 }
7048}
7049
7050static VALUE
7051vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7052{
7053 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7054 return RBOOL(!RTEST(recv));
7055 }
7056 else {
7057 return Qundef;
7058 }
7059}
7060
7061static VALUE
7062vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7063{
7064 if (SPECIAL_CONST_P(recv)) {
7065 return Qundef;
7066 }
7067 else if (RBASIC_CLASS(recv) == rb_cString &&
7068 CLASS_OF(obj) == rb_cRegexp &&
7069 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7070 return rb_reg_match(obj, recv);
7071 }
7072 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7073 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7074 return rb_reg_match(recv, obj);
7075 }
7076 else {
7077 return Qundef;
7078 }
7079}
7080
7081rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7082
7083NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7084
7085static inline void
7086vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7087 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7088 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7089{
7090 rb_event_flag_t event = pc_events & target_event;
7091 VALUE self = GET_SELF();
7092
7093 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7094
7095 if (event & global_hooks->events) {
7096 /* increment PC because source line is calculated with PC-1 */
7097 reg_cfp->pc++;
7098 vm_dtrace(event, ec);
7099 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7100 reg_cfp->pc--;
7101 }
7102
7103 // Load here since global hook above can add and free local hooks
7104 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7105 if (local_hooks != NULL) {
7106 if (event & local_hooks->events) {
7107 /* increment PC because source line is calculated with PC-1 */
7108 reg_cfp->pc++;
7109 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7110 reg_cfp->pc--;
7111 }
7112 }
7113}
7114
7115#define VM_TRACE_HOOK(target_event, val) do { \
7116 if ((pc_events & (target_event)) & enabled_flags) { \
7117 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7118 } \
7119} while (0)
7120
7121static VALUE
7122rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7123{
7124 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7125 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7126 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7127}
7128
7129static void
7130vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7131{
7132 const VALUE *pc = reg_cfp->pc;
7133 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7134 rb_event_flag_t global_events = enabled_flags;
7135
7136 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7137 return;
7138 }
7139 else {
7140 const rb_iseq_t *iseq = reg_cfp->iseq;
7141 VALUE iseq_val = (VALUE)iseq;
7142 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7143 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7144 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7145 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7146 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7147 rb_hook_list_t *bmethod_local_hooks = NULL;
7148 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7149 rb_event_flag_t bmethod_local_events = 0;
7150 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7151 enabled_flags |= iseq_local_events;
7152
7153 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7154
7155 if (bmethod_frame) {
7156 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7157 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7158 bmethod_local_hooks = me->def->body.bmethod.hooks;
7159 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7160 if (bmethod_local_hooks) {
7161 bmethod_local_events = bmethod_local_hooks->events;
7162 }
7163 }
7164
7165
7166 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7167#if 0
7168 /* disable trace */
7169 /* TODO: incomplete */
7170 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7171#else
7172 /* do not disable trace because of performance problem
7173 * (re-enable overhead)
7174 */
7175#endif
7176 return;
7177 }
7178 else if (ec->trace_arg != NULL) {
7179 /* already tracing */
7180 return;
7181 }
7182 else {
7183 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7184 /* Note, not considering iseq local events here since the same
7185 * iseq could be used in multiple bmethods. */
7186 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7187
7188 if (0) {
7189 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7190 (int)pos,
7191 (int)pc_events,
7192 RSTRING_PTR(rb_iseq_path(iseq)),
7193 (int)rb_iseq_line_no(iseq, pos),
7194 RSTRING_PTR(rb_iseq_label(iseq)));
7195 }
7196 VM_ASSERT(reg_cfp->pc == pc);
7197 VM_ASSERT(pc_events != 0);
7198
7199 /* check traces */
7200 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7201 /* b_call instruction running as a method. Fire call event. */
7202 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7203 }
7205 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7206 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7207 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7208 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7209 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7210 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7211 /* b_return instruction running as a method. Fire return event. */
7212 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7213 }
7214
7215 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7216 // We need the pointer to stay valid in case compaction happens in a trace hook.
7217 //
7218 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7219 // storage for `rb_method_definition_t` is not on the GC heap.
7220 RB_GC_GUARD(iseq_val);
7221 }
7222 }
7223}
7224#undef VM_TRACE_HOOK
7225
7226#if VM_CHECK_MODE > 0
7227NORETURN( NOINLINE( COLDFUNC
7228void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7229
7230void
7231Init_vm_stack_canary(void)
7232{
7233 /* This has to be called _after_ our PRNG is properly set up. */
7234 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7235 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7236
7237 vm_stack_canary_was_born = true;
7238 VM_ASSERT(n == 0);
7239}
7240
7241void
7242rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7243{
7244 /* Because a method has already been called, why not call
7245 * another one. */
7246 const char *insn = rb_insns_name(i);
7247 VALUE inspection = rb_inspect(c);
7248 const char *str = StringValueCStr(inspection);
7249
7250 rb_bug("dead canary found at %s: %s", insn, str);
7251}
7252
7253#else
7254void Init_vm_stack_canary(void) { /* nothing to do */ }
7255#endif
7256
7257
7258/* a part of the following code is generated by this ruby script:
7259
726016.times{|i|
7261 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7262 typedef_args.prepend(", ") if i != 0
7263 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7264 call_args.prepend(", ") if i != 0
7265 puts %Q{
7266static VALUE
7267builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7268{
7269 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7270 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7271}}
7272}
7273
7274puts
7275puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
727616.times{|i|
7277 puts " builtin_invoker#{i},"
7278}
7279puts "};"
7280*/
7281
7282static VALUE
7283builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7284{
7285 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7286 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7287}
7288
7289static VALUE
7290builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7291{
7292 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7293 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7294}
7295
7296static VALUE
7297builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7298{
7299 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7300 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7301}
7302
7303static VALUE
7304builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7305{
7306 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7307 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7308}
7309
7310static VALUE
7311builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7312{
7313 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7314 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7315}
7316
7317static VALUE
7318builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7319{
7320 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7321 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7322}
7323
7324static VALUE
7325builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7326{
7327 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7328 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7329}
7330
7331static VALUE
7332builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7333{
7334 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7335 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7336}
7337
7338static VALUE
7339builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7340{
7341 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7342 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7343}
7344
7345static VALUE
7346builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7347{
7348 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7349 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7350}
7351
7352static VALUE
7353builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7354{
7355 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7356 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7357}
7358
7359static VALUE
7360builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7361{
7362 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7363 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7364}
7365
7366static VALUE
7367builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7368{
7369 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7370 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7371}
7372
7373static VALUE
7374builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7375{
7376 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7377 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7378}
7379
7380static VALUE
7381builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7382{
7383 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7384 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7385}
7386
7387static VALUE
7388builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7389{
7390 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7391 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7392}
7393
7394typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7395
7396static builtin_invoker
7397lookup_builtin_invoker(int argc)
7398{
7399 static const builtin_invoker invokers[] = {
7400 builtin_invoker0,
7401 builtin_invoker1,
7402 builtin_invoker2,
7403 builtin_invoker3,
7404 builtin_invoker4,
7405 builtin_invoker5,
7406 builtin_invoker6,
7407 builtin_invoker7,
7408 builtin_invoker8,
7409 builtin_invoker9,
7410 builtin_invoker10,
7411 builtin_invoker11,
7412 builtin_invoker12,
7413 builtin_invoker13,
7414 builtin_invoker14,
7415 builtin_invoker15,
7416 };
7417
7418 return invokers[argc];
7419}
7420
7421static inline VALUE
7422invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7423{
7424 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7425 SETUP_CANARY(canary_p);
7426 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7427 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7428 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7429 return ret;
7430}
7431
7432static VALUE
7433vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7434{
7435 return invoke_bf(ec, cfp, bf, argv);
7436}
7437
7438static VALUE
7439vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7440{
7441 if (0) { // debug print
7442 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7443 for (int i=0; i<bf->argc; i++) {
7444 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7445 }
7446 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7447 (void *)(uintptr_t)bf->func_ptr);
7448 }
7449
7450 if (bf->argc == 0) {
7451 return invoke_bf(ec, cfp, bf, NULL);
7452 }
7453 else {
7454 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7455 return invoke_bf(ec, cfp, bf, argv);
7456 }
7457}
7458
7459// for __builtin_inline!()
7460
7461VALUE
7462rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7463{
7464 const rb_control_frame_t *cfp = ec->cfp;
7465 return cfp->ep[index];
7466}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2795
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1579
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1470
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1449
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:65
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:680
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:693
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:68
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2123
VALUE rb_cRegexp
Regexp class.
Definition re.c:2661
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1309
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:693
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_cModule
Module class.
Definition object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:878
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:44
VALUE rb_cString
String class.
Definition string.c:82
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1024
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1951
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3716
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1926
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2008
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1909
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1975
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2041
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:4102
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5710
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:4068
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4349
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2745
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:895
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1480
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3651
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2125
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4450
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4506
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1518
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:4126
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3486
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:135
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3657
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:421
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2162
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3984
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4528
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:378
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3978
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1393
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1934
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1134
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:987
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:86
SVAR (Special VARiable)
Definition imemo.h:50
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:59
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376