Ruby 3.5.0dev (2025-06-07 revision 20cf46039a90135b3d9efceabc73b0d41ad257b8)
vm_insnhelper.c (20cf46039a90135b3d9efceabc73b0d41ad257b8)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
83/* critical level
84 * 0: VM stack overflow or about to machine stack overflow
85 * 1: machine stack overflow but may be recoverable
86 * 2: fatal machine stack overflow
87 */
88void
89rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
90{
91 if (rb_during_gc()) {
92 rb_bug("system stack overflow during GC. Faulty native extension?");
93 }
94 if (crit > 1) {
95 ec->raised_flag = RAISED_STACKOVERFLOW;
96 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
97 EC_JUMP_TAG(ec, TAG_RAISE);
98 }
99 ec_stack_overflow(ec, crit == 0);
100}
101
102static inline void stack_check(rb_execution_context_t *ec);
103
104#if VM_CHECK_MODE > 0
105static int
106callable_class_p(VALUE klass)
107{
108#if VM_CHECK_MODE >= 2
109 if (!klass) return FALSE;
110 switch (RB_BUILTIN_TYPE(klass)) {
111 default:
112 break;
113 case T_ICLASS:
114 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
115 case T_MODULE:
116 return TRUE;
117 }
118 while (klass) {
119 if (klass == rb_cBasicObject) {
120 return TRUE;
121 }
122 klass = RCLASS_SUPER(klass);
123 }
124 return FALSE;
125#else
126 return klass != 0;
127#endif
128}
129
130static int
131callable_method_entry_p(const rb_callable_method_entry_t *cme)
132{
133 if (cme == NULL) {
134 return TRUE;
135 }
136 else {
137 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
138
139 if (callable_class_p(cme->defined_class)) {
140 return TRUE;
141 }
142 else {
143 return FALSE;
144 }
145 }
146}
147
148static void
149vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
150{
151 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
152 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
153
154 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
155 cref_or_me_type = imemo_type(cref_or_me);
156 }
157 if (type & VM_FRAME_FLAG_BMETHOD) {
158 req_me = TRUE;
159 }
160
161 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
162 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
163 }
164 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
165 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
166 }
167
168 if (req_me) {
169 if (cref_or_me_type != imemo_ment) {
170 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
171 }
172 }
173 else {
174 if (req_cref && cref_or_me_type != imemo_cref) {
175 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
176 }
177 else { /* cref or Qfalse */
178 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
179 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
180 /* ignore */
181 }
182 else {
183 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
184 }
185 }
186 }
187 }
188
189 if (cref_or_me_type == imemo_ment) {
190 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
191
192 if (!callable_method_entry_p(me)) {
193 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
194 }
195 }
196
197 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
198 VM_ASSERT(iseq == NULL ||
199 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
200 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
201 );
202 }
203 else {
204 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
205 }
206}
207
208static void
209vm_check_frame(VALUE type,
210 VALUE specval,
211 VALUE cref_or_me,
212 const rb_iseq_t *iseq)
213{
214 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
215 VM_ASSERT(FIXNUM_P(type));
216
217#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
218 case magic: \
219 vm_check_frame_detail(type, req_block, req_me, req_cref, \
220 specval, cref_or_me, is_cframe, iseq); \
221 break
222 switch (given_magic) {
223 /* BLK ME CREF CFRAME */
224 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
226 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
227 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
228 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
229 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
230 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
231 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
232 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
233 default:
234 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
235 }
236#undef CHECK
237}
238
239static VALUE vm_stack_canary; /* Initialized later */
240static bool vm_stack_canary_was_born = false;
241
242// Return the index of the instruction right before the given PC.
243// This is needed because insn_entry advances PC before the insn body.
244static unsigned int
245previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
246{
247 unsigned int pos = 0;
248 while (pos < ISEQ_BODY(iseq)->iseq_size) {
249 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
250 unsigned int next_pos = pos + insn_len(opcode);
251 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
252 return pos;
253 }
254 pos = next_pos;
255 }
256 rb_bug("failed to find the previous insn");
257}
258
259void
260rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
261{
262 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
263 const struct rb_iseq_struct *iseq;
264
265 if (! LIKELY(vm_stack_canary_was_born)) {
266 return; /* :FIXME: isn't it rather fatal to enter this branch? */
267 }
268 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
269 /* This is at the very beginning of a thread. cfp does not exist. */
270 return;
271 }
272 else if (! (iseq = GET_ISEQ())) {
273 return;
274 }
275 else if (LIKELY(sp[0] != vm_stack_canary)) {
276 return;
277 }
278 else {
279 /* we are going to call methods below; squash the canary to
280 * prevent infinite loop. */
281 sp[0] = Qundef;
282 }
283
284 const VALUE *orig = rb_iseq_original_iseq(iseq);
285 const VALUE iseqw = rb_iseqw_new(iseq);
286 const VALUE inspection = rb_inspect(iseqw);
287 const char *stri = rb_str_to_cstr(inspection);
288 const VALUE disasm = rb_iseq_disasm(iseq);
289 const char *strd = rb_str_to_cstr(disasm);
290 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
291 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
292 const char *name = insn_name(insn);
293
294 /* rb_bug() is not capable of outputting this large contents. It
295 is designed to run form a SIGSEGV handler, which tends to be
296 very restricted. */
297 ruby_debug_printf(
298 "We are killing the stack canary set by %s, "
299 "at %s@pc=%"PRIdPTR"\n"
300 "watch out the C stack trace.\n"
301 "%s",
302 name, stri, pos, strd);
303 rb_bug("see above.");
304}
305#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
306
307#else
308#define vm_check_canary(ec, sp)
309#define vm_check_frame(a, b, c, d)
310#endif /* VM_CHECK_MODE > 0 */
311
312#if USE_DEBUG_COUNTER
313static void
314vm_push_frame_debug_counter_inc(
315 const struct rb_execution_context_struct *ec,
316 const struct rb_control_frame_struct *reg_cfp,
317 VALUE type)
318{
319 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
320
321 RB_DEBUG_COUNTER_INC(frame_push);
322
323 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
324 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
325 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
326 if (prev) {
327 if (curr) {
328 RB_DEBUG_COUNTER_INC(frame_R2R);
329 }
330 else {
331 RB_DEBUG_COUNTER_INC(frame_R2C);
332 }
333 }
334 else {
335 if (curr) {
336 RB_DEBUG_COUNTER_INC(frame_C2R);
337 }
338 else {
339 RB_DEBUG_COUNTER_INC(frame_C2C);
340 }
341 }
342 }
343
344 switch (type & VM_FRAME_MAGIC_MASK) {
345 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
346 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
347 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
348 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
349 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
350 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
351 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
352 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
353 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
354 }
355
356 rb_bug("unreachable");
357}
358#else
359#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
360#endif
361
362// Return a poison value to be set above the stack top to verify leafness.
363VALUE
364rb_vm_stack_canary(void)
365{
366#if VM_CHECK_MODE > 0
367 return vm_stack_canary;
368#else
369 return 0;
370#endif
371}
372
373STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
374STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
375STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
376
377static void
378vm_push_frame(rb_execution_context_t *ec,
379 const rb_iseq_t *iseq,
380 VALUE type,
381 VALUE self,
382 VALUE specval,
383 VALUE cref_or_me,
384 const VALUE *pc,
385 VALUE *sp,
386 int local_size,
387 int stack_max)
388{
389 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
390
391 vm_check_frame(type, specval, cref_or_me, iseq);
392 VM_ASSERT(local_size >= 0);
393
394 /* check stack overflow */
395 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
396 vm_check_canary(ec, sp);
397
398 /* setup vm value stack */
399
400 /* initialize local variables */
401 for (int i=0; i < local_size; i++) {
402 *sp++ = Qnil;
403 }
404
405 /* setup ep with managing data */
406 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
407 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
408 *sp++ = type; /* ep[-0] / ENV_FLAGS */
409
410 /* setup new frame */
411 *cfp = (const struct rb_control_frame_struct) {
412 .pc = pc,
413 .sp = sp,
414 .iseq = iseq,
415 .self = self,
416 .ep = sp - 1,
417 .block_code = NULL,
418#if VM_DEBUG_BP_CHECK
419 .bp_check = sp,
420#endif
421 .jit_return = NULL,
422 };
423
424 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
425 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
426 future/untested compilers/platforms. */
427
428 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
429 atomic_signal_fence(memory_order_seq_cst);
430 #endif
431
432 ec->cfp = cfp;
433
434 if (VMDEBUG == 2) {
435 SDR();
436 }
437 vm_push_frame_debug_counter_inc(ec, cfp, type);
438}
439
440void
441rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
442{
443 rb_control_frame_t *cfp = ec->cfp;
444
445 if (VMDEBUG == 2) SDR();
446
447 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
448}
449
450/* return TRUE if the frame is finished */
451static inline int
452vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
453{
454 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
455
456 if (VMDEBUG == 2) SDR();
457
458 RUBY_VM_CHECK_INTS(ec);
459 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
460
461 return flags & VM_FRAME_FLAG_FINISH;
462}
463
464void
465rb_vm_pop_frame(rb_execution_context_t *ec)
466{
467 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
468}
469
470// it pushes pseudo-frame with fname filename.
471VALUE
472rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
473{
474 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
475 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
476
477 vm_push_frame(ec,
478 dmy_iseq, //const rb_iseq_t *iseq,
479 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
480 ec->cfp->self, // VALUE self,
481 VM_BLOCK_HANDLER_NONE, // VALUE specval,
482 Qfalse, // VALUE cref_or_me,
483 NULL, // const VALUE *pc,
484 ec->cfp->sp, // VALUE *sp,
485 0, // int local_size,
486 0); // int stack_max
487
488 return (VALUE)dmy_iseq;
489}
490
491/* method dispatch */
492static inline VALUE
493rb_arity_error_new(int argc, int min, int max)
494{
495 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
496 if (min == max) {
497 /* max is not needed */
498 }
499 else if (max == UNLIMITED_ARGUMENTS) {
500 rb_str_cat_cstr(err_mess, "+");
501 }
502 else {
503 rb_str_catf(err_mess, "..%d", max);
504 }
505 rb_str_cat_cstr(err_mess, ")");
506 return rb_exc_new3(rb_eArgError, err_mess);
507}
508
509void
510rb_error_arity(int argc, int min, int max)
511{
512 rb_exc_raise(rb_arity_error_new(argc, min, max));
513}
514
515/* lvar */
516
517NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
518
519static void
520vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
521{
522 /* remember env value forcely */
523 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
524 VM_FORCE_WRITE(&ep[index], v);
525 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
526 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
527}
528
529// YJIT assumes this function never runs GC
530static inline void
531vm_env_write(const VALUE *ep, int index, VALUE v)
532{
533 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
534 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
535 VM_STACK_ENV_WRITE(ep, index, v);
536 }
537 else {
538 vm_env_write_slowpath(ep, index, v);
539 }
540}
541
542void
543rb_vm_env_write(const VALUE *ep, int index, VALUE v)
544{
545 vm_env_write(ep, index, v);
546}
547
548VALUE
549rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
550{
551 if (block_handler == VM_BLOCK_HANDLER_NONE) {
552 return Qnil;
553 }
554 else {
555 switch (vm_block_handler_type(block_handler)) {
556 case block_handler_type_iseq:
557 case block_handler_type_ifunc:
558 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
559 case block_handler_type_symbol:
560 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
561 case block_handler_type_proc:
562 return VM_BH_TO_PROC(block_handler);
563 default:
564 VM_UNREACHABLE(rb_vm_bh_to_procval);
565 }
566 }
567}
568
569/* svar */
570
571#if VM_CHECK_MODE > 0
572static int
573vm_svar_valid_p(VALUE svar)
574{
575 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
576 switch (imemo_type(svar)) {
577 case imemo_svar:
578 case imemo_cref:
579 case imemo_ment:
580 return TRUE;
581 default:
582 break;
583 }
584 }
585 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
586 return FALSE;
587}
588#endif
589
590static inline struct vm_svar *
591lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
592{
593 VALUE svar;
594
595 if (lep && (ec == NULL || ec->root_lep != lep)) {
596 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
597 }
598 else {
599 svar = ec->root_svar;
600 }
601
602 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
603
604 return (struct vm_svar *)svar;
605}
606
607static inline void
608lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
609{
610 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
611
612 if (lep && (ec == NULL || ec->root_lep != lep)) {
613 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
614 }
615 else {
616 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
617 }
618}
619
620static VALUE
621lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
622{
623 const struct vm_svar *svar = lep_svar(ec, lep);
624
625 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
626
627 switch (key) {
628 case VM_SVAR_LASTLINE:
629 return svar->lastline;
630 case VM_SVAR_BACKREF:
631 return svar->backref;
632 default: {
633 const VALUE ary = svar->others;
634
635 if (NIL_P(ary)) {
636 return Qnil;
637 }
638 else {
639 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
640 }
641 }
642 }
643}
644
645static struct vm_svar *
646svar_new(VALUE obj)
647{
648 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
649 *((VALUE *)&svar->lastline) = Qnil;
650 *((VALUE *)&svar->backref) = Qnil;
651 *((VALUE *)&svar->others) = Qnil;
652
653 return svar;
654}
655
656static void
657lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
658{
659 struct vm_svar *svar = lep_svar(ec, lep);
660
661 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
662 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
663 }
664
665 switch (key) {
666 case VM_SVAR_LASTLINE:
667 RB_OBJ_WRITE(svar, &svar->lastline, val);
668 return;
669 case VM_SVAR_BACKREF:
670 RB_OBJ_WRITE(svar, &svar->backref, val);
671 return;
672 default: {
673 VALUE ary = svar->others;
674
675 if (NIL_P(ary)) {
676 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
677 }
678 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
679 }
680 }
681}
682
683static inline VALUE
684vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
685{
686 VALUE val;
687
688 if (type == 0) {
689 val = lep_svar_get(ec, lep, key);
690 }
691 else {
692 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
693
694 if (type & 0x01) {
695 switch (type >> 1) {
696 case '&':
697 val = rb_reg_last_match(backref);
698 break;
699 case '`':
700 val = rb_reg_match_pre(backref);
701 break;
702 case '\'':
703 val = rb_reg_match_post(backref);
704 break;
705 case '+':
706 val = rb_reg_match_last(backref);
707 break;
708 default:
709 rb_bug("unexpected back-ref");
710 }
711 }
712 else {
713 val = rb_reg_nth_match((int)(type >> 1), backref);
714 }
715 }
716 return val;
717}
718
719static inline VALUE
720vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
721{
722 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
723 int nth = 0;
724
725 if (type & 0x01) {
726 switch (type >> 1) {
727 case '&':
728 case '`':
729 case '\'':
730 break;
731 case '+':
732 return rb_reg_last_defined(backref);
733 default:
734 rb_bug("unexpected back-ref");
735 }
736 }
737 else {
738 nth = (int)(type >> 1);
739 }
740 return rb_reg_nth_defined(nth, backref);
741}
742
743PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
745check_method_entry(VALUE obj, int can_be_svar)
746{
747 if (obj == Qfalse) return NULL;
748
749#if VM_CHECK_MODE > 0
750 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
751#endif
752
753 switch (imemo_type(obj)) {
754 case imemo_ment:
755 return (rb_callable_method_entry_t *)obj;
756 case imemo_cref:
757 return NULL;
758 case imemo_svar:
759 if (can_be_svar) {
760 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
761 }
762 default:
763#if VM_CHECK_MODE > 0
764 rb_bug("check_method_entry: svar should not be there:");
765#endif
766 return NULL;
767 }
768}
769
771rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
772{
773 const VALUE *ep = cfp->ep;
775
776 while (!VM_ENV_LOCAL_P(ep)) {
777 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
778 ep = VM_ENV_PREV_EP(ep);
779 }
780
781 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
782}
783
784static const rb_iseq_t *
785method_entry_iseqptr(const rb_callable_method_entry_t *me)
786{
787 switch (me->def->type) {
788 case VM_METHOD_TYPE_ISEQ:
789 return me->def->body.iseq.iseqptr;
790 default:
791 return NULL;
792 }
793}
794
795static rb_cref_t *
796method_entry_cref(const rb_callable_method_entry_t *me)
797{
798 switch (me->def->type) {
799 case VM_METHOD_TYPE_ISEQ:
800 return me->def->body.iseq.cref;
801 default:
802 return NULL;
803 }
804}
805
806#if VM_CHECK_MODE == 0
807PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
808#endif
809static rb_cref_t *
810check_cref(VALUE obj, int can_be_svar)
811{
812 if (obj == Qfalse) return NULL;
813
814#if VM_CHECK_MODE > 0
815 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
816#endif
817
818 switch (imemo_type(obj)) {
819 case imemo_ment:
820 return method_entry_cref((rb_callable_method_entry_t *)obj);
821 case imemo_cref:
822 return (rb_cref_t *)obj;
823 case imemo_svar:
824 if (can_be_svar) {
825 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
826 }
827 default:
828#if VM_CHECK_MODE > 0
829 rb_bug("check_method_entry: svar should not be there:");
830#endif
831 return NULL;
832 }
833}
834
835static inline rb_cref_t *
836vm_env_cref(const VALUE *ep)
837{
838 rb_cref_t *cref;
839
840 while (!VM_ENV_LOCAL_P(ep)) {
841 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
842 ep = VM_ENV_PREV_EP(ep);
843 }
844
845 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
846}
847
848static int
849is_cref(const VALUE v, int can_be_svar)
850{
851 if (RB_TYPE_P(v, T_IMEMO)) {
852 switch (imemo_type(v)) {
853 case imemo_cref:
854 return TRUE;
855 case imemo_svar:
856 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
857 default:
858 break;
859 }
860 }
861 return FALSE;
862}
863
864static int
865vm_env_cref_by_cref(const VALUE *ep)
866{
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
872}
873
874static rb_cref_t *
875cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
876{
877 const VALUE v = *vptr;
878 rb_cref_t *cref, *new_cref;
879
880 if (RB_TYPE_P(v, T_IMEMO)) {
881 switch (imemo_type(v)) {
882 case imemo_cref:
883 cref = (rb_cref_t *)v;
884 new_cref = vm_cref_dup(cref);
885 if (parent) {
886 RB_OBJ_WRITE(parent, vptr, new_cref);
887 }
888 else {
889 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
890 }
891 return (rb_cref_t *)new_cref;
892 case imemo_svar:
893 if (can_be_svar) {
894 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
895 }
896 /* fall through */
897 case imemo_ment:
898 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
899 default:
900 break;
901 }
902 }
903 return NULL;
904}
905
906static rb_cref_t *
907vm_cref_replace_with_duplicated_cref(const VALUE *ep)
908{
909 if (vm_env_cref_by_cref(ep)) {
910 rb_cref_t *cref;
911 VALUE envval;
912
913 while (!VM_ENV_LOCAL_P(ep)) {
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
916 return cref;
917 }
918 ep = VM_ENV_PREV_EP(ep);
919 }
920 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
921 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
922 }
923 else {
924 rb_bug("vm_cref_dup: unreachable");
925 }
926}
927
928static rb_cref_t *
929vm_get_cref(const VALUE *ep)
930{
931 rb_cref_t *cref = vm_env_cref(ep);
932
933 if (cref != NULL) {
934 return cref;
935 }
936 else {
937 rb_bug("vm_get_cref: unreachable");
938 }
939}
940
941rb_cref_t *
942rb_vm_get_cref(const VALUE *ep)
943{
944 return vm_get_cref(ep);
945}
946
947static rb_cref_t *
948vm_ec_cref(const rb_execution_context_t *ec)
949{
950 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
951
952 if (cfp == NULL) {
953 return NULL;
954 }
955 return vm_get_cref(cfp->ep);
956}
957
958static const rb_cref_t *
959vm_get_const_key_cref(const VALUE *ep)
960{
961 const rb_cref_t *cref = vm_get_cref(ep);
962 const rb_cref_t *key_cref = cref;
963
964 while (cref) {
965 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
966 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
967 return key_cref;
968 }
969 cref = CREF_NEXT(cref);
970 }
971
972 /* does not include singleton class */
973 return NULL;
974}
975
976void
977rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
978{
979 rb_cref_t *new_cref;
980
981 while (cref) {
982 if (CREF_CLASS(cref) == old_klass) {
983 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
984 *new_cref_ptr = new_cref;
985 return;
986 }
987 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
988 cref = CREF_NEXT(cref);
989 *new_cref_ptr = new_cref;
990 new_cref_ptr = &new_cref->next;
991 }
992 *new_cref_ptr = NULL;
993}
994
995static rb_cref_t *
996vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
997{
998 rb_cref_t *prev_cref = NULL;
999
1000 if (ep) {
1001 prev_cref = vm_env_cref(ep);
1002 }
1003 else {
1004 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1005
1006 if (cfp) {
1007 prev_cref = vm_env_cref(cfp->ep);
1008 }
1009 }
1010
1011 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1012}
1013
1014static inline VALUE
1015vm_get_cbase(const VALUE *ep)
1016{
1017 const rb_cref_t *cref = vm_get_cref(ep);
1018
1019 return CREF_CLASS_FOR_DEFINITION(cref);
1020}
1021
1022static inline VALUE
1023vm_get_const_base(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 while (cref) {
1028 if (!CREF_PUSHED_BY_EVAL(cref)) {
1029 return CREF_CLASS_FOR_DEFINITION(cref);
1030 }
1031 cref = CREF_NEXT(cref);
1032 }
1033
1034 return Qundef;
1035}
1036
1037static inline void
1038vm_check_if_namespace(VALUE klass)
1039{
1040 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1041 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1042 }
1043}
1044
1045static inline void
1046vm_ensure_not_refinement_module(VALUE self)
1047{
1048 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1049 rb_warn("not defined at the refinement, but at the outer class/module");
1050 }
1051}
1052
1053static inline VALUE
1054vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1055{
1056 return klass;
1057}
1058
1059static inline VALUE
1060vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1061{
1062 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1063 VALUE val;
1064
1065 if (NIL_P(orig_klass) && allow_nil) {
1066 /* in current lexical scope */
1067 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1068 const rb_cref_t *cref;
1069 VALUE klass = Qnil;
1070
1071 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1072 root_cref = CREF_NEXT(root_cref);
1073 }
1074 cref = root_cref;
1075 while (cref && CREF_NEXT(cref)) {
1076 if (CREF_PUSHED_BY_EVAL(cref)) {
1077 klass = Qnil;
1078 }
1079 else {
1080 klass = CREF_CLASS(cref);
1081 }
1082 cref = CREF_NEXT(cref);
1083
1084 if (!NIL_P(klass)) {
1085 VALUE av, am = 0;
1086 rb_const_entry_t *ce;
1087 search_continue:
1088 if ((ce = rb_const_lookup(klass, id))) {
1089 rb_const_warn_if_deprecated(ce, klass, id);
1090 val = ce->value;
1091 if (UNDEF_P(val)) {
1092 if (am == klass) break;
1093 am = klass;
1094 if (is_defined) return 1;
1095 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1096 rb_autoload_load(klass, id);
1097 goto search_continue;
1098 }
1099 else {
1100 if (is_defined) {
1101 return 1;
1102 }
1103 else {
1104 if (UNLIKELY(!rb_ractor_main_p())) {
1105 if (!rb_ractor_shareable_p(val)) {
1106 rb_raise(rb_eRactorIsolationError,
1107 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1108 }
1109 }
1110 return val;
1111 }
1112 }
1113 }
1114 }
1115 }
1116
1117 /* search self */
1118 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1119 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1120 }
1121 else {
1122 klass = CLASS_OF(ec->cfp->self);
1123 }
1124
1125 if (is_defined) {
1126 return rb_const_defined(klass, id);
1127 }
1128 else {
1129 return rb_const_get(klass, id);
1130 }
1131 }
1132 else {
1133 vm_check_if_namespace(orig_klass);
1134 if (is_defined) {
1135 return rb_public_const_defined_from(orig_klass, id);
1136 }
1137 else {
1138 return rb_public_const_get_from(orig_klass, id);
1139 }
1140 }
1141}
1142
1143VALUE
1144rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1145{
1146 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1147}
1148
1149static inline VALUE
1150vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1151{
1152 VALUE val = Qnil;
1153 int idx = 0;
1154 int allow_nil = TRUE;
1155 if (segments[0] == idNULL) {
1156 val = rb_cObject;
1157 idx++;
1158 allow_nil = FALSE;
1159 }
1160 while (segments[idx]) {
1161 ID id = segments[idx++];
1162 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1163 allow_nil = FALSE;
1164 }
1165 return val;
1166}
1167
1168
1169static inline VALUE
1170vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1171{
1172 VALUE klass;
1173
1174 if (!cref) {
1175 rb_bug("vm_get_cvar_base: no cref");
1176 }
1177
1178 while (CREF_NEXT(cref) &&
1179 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1180 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1181 cref = CREF_NEXT(cref);
1182 }
1183 if (top_level_raise && !CREF_NEXT(cref)) {
1184 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1185 }
1186
1187 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1188
1189 if (NIL_P(klass)) {
1190 rb_raise(rb_eTypeError, "no class variables available");
1191 }
1192 return klass;
1193}
1194
1195ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1196static inline void
1197fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1198{
1199 if (is_attr) {
1200 vm_cc_attr_index_set(cc, index, shape_id);
1201 }
1202 else {
1203 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1204 }
1205}
1206
1207#define ractor_incidental_shareable_p(cond, val) \
1208 (!(cond) || rb_ractor_shareable_p(val))
1209#define ractor_object_incidental_shareable_p(obj, val) \
1210 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1211
1212ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1213static inline VALUE
1214vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1215{
1216#if OPT_IC_FOR_IVAR
1217 VALUE val = Qundef;
1218 VALUE * ivar_list;
1219
1220 if (SPECIAL_CONST_P(obj)) {
1221 return default_value;
1222 }
1223
1224 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(obj);
1225
1226 switch (BUILTIN_TYPE(obj)) {
1227 case T_OBJECT:
1228 ivar_list = ROBJECT_FIELDS(obj);
1229 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1230 break;
1231 case T_CLASS:
1232 case T_MODULE:
1233 {
1234 if (UNLIKELY(!rb_ractor_main_p())) {
1235 // For two reasons we can only use the fast path on the main
1236 // ractor.
1237 // First, only the main ractor is allowed to set ivars on classes
1238 // and modules. So we can skip locking.
1239 // Second, other ractors need to check the shareability of the
1240 // values returned from the class ivars.
1241
1242 if (default_value == Qundef) { // defined?
1243 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1244 }
1245 else {
1246 goto general_path;
1247 }
1248 }
1249
1250 ivar_list = RCLASS_PRIME_FIELDS(obj);
1251 break;
1252 }
1253 default:
1254 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1255 struct gen_fields_tbl *fields_tbl;
1256 rb_gen_fields_tbl_get(obj, id, &fields_tbl);
1257 ivar_list = fields_tbl->as.shape.fields;
1258 }
1259 else {
1260 return default_value;
1261 }
1262 }
1263
1264 shape_id_t cached_id;
1265 attr_index_t index;
1266
1267 if (is_attr) {
1268 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1269 }
1270 else {
1271 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1272 }
1273
1274 if (LIKELY(cached_id == shape_id)) {
1275 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1276
1277 if (index == ATTR_INDEX_NOT_SET) {
1278 return default_value;
1279 }
1280
1281 val = ivar_list[index];
1282#if USE_DEBUG_COUNTER
1283 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1284
1285 if (RB_TYPE_P(obj, T_OBJECT)) {
1286 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1287 }
1288#endif
1289 RUBY_ASSERT(!UNDEF_P(val));
1290 }
1291 else { // cache miss case
1292#if USE_DEBUG_COUNTER
1293 if (is_attr) {
1294 if (cached_id != INVALID_SHAPE_ID) {
1295 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1296 }
1297 else {
1298 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1299 }
1300 }
1301 else {
1302 if (cached_id != INVALID_SHAPE_ID) {
1303 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1304 }
1305 else {
1306 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1307 }
1308 }
1309 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1310
1311 if (RB_TYPE_P(obj, T_OBJECT)) {
1312 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1313 }
1314#endif
1315
1316 if (rb_shape_too_complex_p(shape_id)) {
1317 st_table *table = NULL;
1318 switch (BUILTIN_TYPE(obj)) {
1319 case T_CLASS:
1320 case T_MODULE:
1321 table = (st_table *)RCLASS_FIELDS_HASH(obj);
1322 break;
1323
1324 case T_OBJECT:
1325 table = ROBJECT_FIELDS_HASH(obj);
1326 break;
1327
1328 default: {
1329 struct gen_fields_tbl *fields_tbl;
1330 if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
1331 table = fields_tbl->as.complex.table;
1332 }
1333 break;
1334 }
1335 }
1336
1337 if (!table || !st_lookup(table, id, &val)) {
1338 val = default_value;
1339 }
1340 }
1341 else {
1342 shape_id_t previous_cached_id = cached_id;
1343 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1344 // This fills in the cache with the shared cache object.
1345 // "ent" is the shared cache object
1346 if (cached_id != previous_cached_id) {
1347 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1348 }
1349
1350 if (index == ATTR_INDEX_NOT_SET) {
1351 val = default_value;
1352 }
1353 else {
1354 // We fetched the ivar list above
1355 val = ivar_list[index];
1356 RUBY_ASSERT(!UNDEF_P(val));
1357 }
1358 }
1359 else {
1360 if (is_attr) {
1361 vm_cc_attr_index_initialize(cc, shape_id);
1362 }
1363 else {
1364 vm_ic_attr_index_initialize(ic, shape_id);
1365 }
1366
1367 val = default_value;
1368 }
1369 }
1370
1371 }
1372
1373 if (!UNDEF_P(default_value)) {
1374 RUBY_ASSERT(!UNDEF_P(val));
1375 }
1376
1377 return val;
1378
1379general_path:
1380#endif /* OPT_IC_FOR_IVAR */
1381 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1382
1383 if (is_attr) {
1384 return rb_attr_get(obj, id);
1385 }
1386 else {
1387 return rb_ivar_get(obj, id);
1388 }
1389}
1390
1391static void
1392populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1393{
1394 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1395
1396 // Cache population code
1397 if (is_attr) {
1398 vm_cc_attr_index_set(cc, index, next_shape_id);
1399 }
1400 else {
1401 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1402 }
1403}
1404
1405ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1406NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1407NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1408
1409static VALUE
1410vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1411{
1412#if OPT_IC_FOR_IVAR
1413 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1414
1415 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1416 rb_check_frozen(obj);
1417
1418 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1419
1420 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1421
1422 if (!rb_shape_too_complex_p(next_shape_id)) {
1423 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1424 }
1425
1426 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1427 return val;
1428 }
1429#endif
1430 return rb_ivar_set(obj, id, val);
1431}
1432
1433static VALUE
1434vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1435{
1436 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1437}
1438
1439static VALUE
1440vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1441{
1442 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1443}
1444
1445NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1446static VALUE
1447vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1448{
1449 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1450
1451 struct gen_fields_tbl *fields_tbl = 0;
1452
1453 // Cache hit case
1454 if (shape_id == dest_shape_id) {
1455 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1456 }
1457 else if (dest_shape_id != INVALID_SHAPE_ID) {
1458 rb_shape_t *dest_shape = RSHAPE(dest_shape_id);
1459
1460 if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1461 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1462 }
1463 else {
1464 return Qundef;
1465 }
1466 }
1467 else {
1468 return Qundef;
1469 }
1470
1471 rb_gen_fields_tbl_get(obj, 0, &fields_tbl);
1472
1473 if (shape_id != dest_shape_id) {
1474 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1475 }
1476
1477 RB_OBJ_WRITE(obj, &fields_tbl->as.shape.fields[index], val);
1478
1479 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1480
1481 return val;
1482}
1483
1484static inline VALUE
1485vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1486{
1487#if OPT_IC_FOR_IVAR
1488 switch (BUILTIN_TYPE(obj)) {
1489 case T_OBJECT:
1490 {
1491 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1492
1493 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1494 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1495
1496 if (LIKELY(shape_id == dest_shape_id)) {
1497 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1498 VM_ASSERT(!rb_ractor_shareable_p(obj));
1499 }
1500 else if (dest_shape_id != INVALID_SHAPE_ID) {
1501 rb_shape_t *dest_shape = RSHAPE(dest_shape_id);
1502 shape_id_t source_shape_id = dest_shape->parent_id;
1503
1504 if (shape_id == source_shape_id && dest_shape->edge_name == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1505 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1506
1507 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1508
1509 RUBY_ASSERT(rb_shape_get_next_iv_shape(source_shape_id, id) == dest_shape_id);
1510 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1511 }
1512 else {
1513 break;
1514 }
1515 }
1516 else {
1517 break;
1518 }
1519
1520 VALUE *ptr = ROBJECT_FIELDS(obj);
1521
1522 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1523 RB_OBJ_WRITE(obj, &ptr[index], val);
1524
1525 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1526 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1527 return val;
1528 }
1529 break;
1530 case T_CLASS:
1531 case T_MODULE:
1532 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1533 default:
1534 break;
1535 }
1536
1537 return Qundef;
1538#endif /* OPT_IC_FOR_IVAR */
1539}
1540
1541static VALUE
1542update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1543{
1544 VALUE defined_class = 0;
1545 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1546
1547 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1548 defined_class = RBASIC(defined_class)->klass;
1549 }
1550
1551 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1552 if (!rb_cvc_tbl) {
1553 rb_bug("the cvc table should be set");
1554 }
1555
1556 VALUE ent_data;
1557 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1558 rb_bug("should have cvar cache entry");
1559 }
1560
1561 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1562
1563 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1564 ent->cref = cref;
1565 ic->entry = ent;
1566
1567 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1568 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1569 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1570 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1571
1572 return cvar_value;
1573}
1574
1575static inline VALUE
1576vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1577{
1578 const rb_cref_t *cref;
1579 cref = vm_get_cref(GET_EP());
1580
1581 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1582 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1583
1584 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1585 RUBY_ASSERT(!UNDEF_P(v));
1586
1587 return v;
1588 }
1589
1590 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1591
1592 return update_classvariable_cache(iseq, klass, id, cref, ic);
1593}
1594
1595VALUE
1596rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1597{
1598 return vm_getclassvariable(iseq, cfp, id, ic);
1599}
1600
1601static inline void
1602vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1603{
1604 const rb_cref_t *cref;
1605 cref = vm_get_cref(GET_EP());
1606
1607 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1608 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1609
1610 rb_class_ivar_set(ic->entry->class_value, id, val);
1611 return;
1612 }
1613
1614 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1615
1616 rb_cvar_set(klass, id, val);
1617
1618 update_classvariable_cache(iseq, klass, id, cref, ic);
1619}
1620
1621void
1622rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1623{
1624 vm_setclassvariable(iseq, cfp, id, val, ic);
1625}
1626
1627static inline VALUE
1628vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1629{
1630 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1631}
1632
1633static inline void
1634vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1635{
1636 if (RB_SPECIAL_CONST_P(obj)) {
1638 return;
1639 }
1640
1641 shape_id_t dest_shape_id;
1642 attr_index_t index;
1643 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1644
1645 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1646 switch (BUILTIN_TYPE(obj)) {
1647 case T_OBJECT:
1648 case T_CLASS:
1649 case T_MODULE:
1650 break;
1651 default:
1652 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1653 return;
1654 }
1655 }
1656 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1657 }
1658}
1659
1660void
1661rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1662{
1663 vm_setinstancevariable(iseq, obj, id, val, ic);
1664}
1665
1666static VALUE
1667vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1668{
1669 /* continue throw */
1670
1671 if (FIXNUM_P(err)) {
1672 ec->tag->state = RUBY_TAG_FATAL;
1673 }
1674 else if (SYMBOL_P(err)) {
1675 ec->tag->state = TAG_THROW;
1676 }
1677 else if (THROW_DATA_P(err)) {
1678 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1679 }
1680 else {
1681 ec->tag->state = TAG_RAISE;
1682 }
1683 return err;
1684}
1685
1686static VALUE
1687vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1688 const int flag, const VALUE throwobj)
1689{
1690 const rb_control_frame_t *escape_cfp = NULL;
1691 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1692
1693 if (flag != 0) {
1694 /* do nothing */
1695 }
1696 else if (state == TAG_BREAK) {
1697 int is_orphan = 1;
1698 const VALUE *ep = GET_EP();
1699 const rb_iseq_t *base_iseq = GET_ISEQ();
1700 escape_cfp = reg_cfp;
1701
1702 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1703 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1704 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1705 ep = escape_cfp->ep;
1706 base_iseq = escape_cfp->iseq;
1707 }
1708 else {
1709 ep = VM_ENV_PREV_EP(ep);
1710 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1711 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1712 VM_ASSERT(escape_cfp->iseq == base_iseq);
1713 }
1714 }
1715
1716 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1717 /* lambda{... break ...} */
1718 is_orphan = 0;
1719 state = TAG_RETURN;
1720 }
1721 else {
1722 ep = VM_ENV_PREV_EP(ep);
1723
1724 while (escape_cfp < eocfp) {
1725 if (escape_cfp->ep == ep) {
1726 const rb_iseq_t *const iseq = escape_cfp->iseq;
1727 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1728 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1729 unsigned int i;
1730
1731 if (!ct) break;
1732 for (i=0; i < ct->size; i++) {
1733 const struct iseq_catch_table_entry *const entry =
1734 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1735
1736 if (entry->type == CATCH_TYPE_BREAK &&
1737 entry->iseq == base_iseq &&
1738 entry->start < epc && entry->end >= epc) {
1739 if (entry->cont == epc) { /* found! */
1740 is_orphan = 0;
1741 }
1742 break;
1743 }
1744 }
1745 break;
1746 }
1747
1748 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1749 }
1750 }
1751
1752 if (is_orphan) {
1753 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1754 }
1755 }
1756 else if (state == TAG_RETRY) {
1757 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1758
1759 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1760 }
1761 else if (state == TAG_RETURN) {
1762 const VALUE *current_ep = GET_EP();
1763 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1764 int in_class_frame = 0;
1765 int toplevel = 1;
1766 escape_cfp = reg_cfp;
1767
1768 // find target_lep, target_ep
1769 while (!VM_ENV_LOCAL_P(ep)) {
1770 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1771 target_ep = ep;
1772 }
1773 ep = VM_ENV_PREV_EP(ep);
1774 }
1775 target_lep = ep;
1776
1777 while (escape_cfp < eocfp) {
1778 const VALUE *lep = VM_CF_LEP(escape_cfp);
1779
1780 if (!target_lep) {
1781 target_lep = lep;
1782 }
1783
1784 if (lep == target_lep &&
1785 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1786 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1787 in_class_frame = 1;
1788 target_lep = 0;
1789 }
1790
1791 if (lep == target_lep) {
1792 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1793 toplevel = 0;
1794 if (in_class_frame) {
1795 /* lambda {class A; ... return ...; end} */
1796 goto valid_return;
1797 }
1798 else {
1799 const VALUE *tep = current_ep;
1800
1801 while (target_lep != tep) {
1802 if (escape_cfp->ep == tep) {
1803 /* in lambda */
1804 if (tep == target_ep) {
1805 goto valid_return;
1806 }
1807 else {
1808 goto unexpected_return;
1809 }
1810 }
1811 tep = VM_ENV_PREV_EP(tep);
1812 }
1813 }
1814 }
1815 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1816 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1817 case ISEQ_TYPE_TOP:
1818 case ISEQ_TYPE_MAIN:
1819 if (toplevel) {
1820 if (in_class_frame) goto unexpected_return;
1821 if (target_ep == NULL) {
1822 goto valid_return;
1823 }
1824 else {
1825 goto unexpected_return;
1826 }
1827 }
1828 break;
1829 case ISEQ_TYPE_EVAL: {
1830 const rb_iseq_t *is = escape_cfp->iseq;
1831 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1832 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1833 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1834 t = ISEQ_BODY(is)->type;
1835 }
1836 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1837 break;
1838 }
1839 case ISEQ_TYPE_CLASS:
1840 toplevel = 0;
1841 break;
1842 default:
1843 break;
1844 }
1845 }
1846 }
1847
1848 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1849 if (target_ep == NULL) {
1850 goto valid_return;
1851 }
1852 else {
1853 goto unexpected_return;
1854 }
1855 }
1856
1857 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1858 }
1859 unexpected_return:;
1860 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1861
1862 valid_return:;
1863 /* do nothing */
1864 }
1865 else {
1866 rb_bug("isns(throw): unsupported throw type");
1867 }
1868
1869 ec->tag->state = state;
1870 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1871}
1872
1873static VALUE
1874vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1875 rb_num_t throw_state, VALUE throwobj)
1876{
1877 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1878 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1879
1880 if (state != 0) {
1881 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1882 }
1883 else {
1884 return vm_throw_continue(ec, throwobj);
1885 }
1886}
1887
1888VALUE
1889rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1890{
1891 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1892}
1893
1894static inline void
1895vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1896{
1897 int is_splat = flag & 0x01;
1898 const VALUE *ptr;
1899 rb_num_t len;
1900 const VALUE obj = ary;
1901
1902 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1903 ary = obj;
1904 ptr = &ary;
1905 len = 1;
1906 }
1907 else {
1908 ptr = RARRAY_CONST_PTR(ary);
1909 len = (rb_num_t)RARRAY_LEN(ary);
1910 }
1911
1912 if (num + is_splat == 0) {
1913 /* no space left on stack */
1914 }
1915 else if (flag & 0x02) {
1916 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1917 rb_num_t i = 0, j;
1918
1919 if (len < num) {
1920 for (i = 0; i < num - len; i++) {
1921 *cfp->sp++ = Qnil;
1922 }
1923 }
1924
1925 for (j = 0; i < num; i++, j++) {
1926 VALUE v = ptr[len - j - 1];
1927 *cfp->sp++ = v;
1928 }
1929
1930 if (is_splat) {
1931 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1932 }
1933 }
1934 else {
1935 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1936 if (is_splat) {
1937 if (num > len) {
1938 *cfp->sp++ = rb_ary_new();
1939 }
1940 else {
1941 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1942 }
1943 }
1944
1945 if (num > len) {
1946 rb_num_t i = 0;
1947 for (; i < num - len; i++) {
1948 *cfp->sp++ = Qnil;
1949 }
1950
1951 for (rb_num_t j = 0; i < num; i++, j++) {
1952 *cfp->sp++ = ptr[len - j - 1];
1953 }
1954 }
1955 else {
1956 for (rb_num_t j = 0; j < num; j++) {
1957 *cfp->sp++ = ptr[num - j - 1];
1958 }
1959 }
1960 }
1961
1962 RB_GC_GUARD(ary);
1963}
1964
1965static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1966
1967static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1968
1969static struct rb_class_cc_entries *
1970vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1971{
1972 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1973#if VM_CHECK_MODE > 0
1974 ccs->debug_sig = ~(VALUE)ccs;
1975#endif
1976 ccs->capa = 0;
1977 ccs->len = 0;
1978 ccs->cme = cme;
1979 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1980 ccs->entries = NULL;
1981
1982 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1983 RB_OBJ_WRITTEN(klass, Qundef, cme);
1984 return ccs;
1985}
1986
1987static void
1988vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
1989{
1990 if (! vm_cc_markable(cc)) {
1991 return;
1992 }
1993
1994 if (UNLIKELY(ccs->len == ccs->capa)) {
1995 if (ccs->capa == 0) {
1996 ccs->capa = 1;
1997 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
1998 }
1999 else {
2000 ccs->capa *= 2;
2001 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2002 }
2003 }
2004 VM_ASSERT(ccs->len < ccs->capa);
2005
2006 const int pos = ccs->len++;
2007 ccs->entries[pos].argc = vm_ci_argc(ci);
2008 ccs->entries[pos].flag = vm_ci_flag(ci);
2009 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2010
2011 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2012 // for tuning
2013 // vm_mtbl_dump(klass, 0);
2014 }
2015}
2016
2017#if VM_CHECK_MODE > 0
2018void
2019rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2020{
2021 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2022 for (int i=0; i<ccs->len; i++) {
2023 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2024 ccs->entries[i].flag,
2025 ccs->entries[i].argc);
2026 rp(ccs->entries[i].cc);
2027 }
2028}
2029
2030static int
2031vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2032{
2033 VM_ASSERT(vm_ccs_p(ccs));
2034 VM_ASSERT(ccs->len <= ccs->capa);
2035
2036 for (int i=0; i<ccs->len; i++) {
2037 const struct rb_callcache *cc = ccs->entries[i].cc;
2038
2039 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2040 VM_ASSERT(vm_cc_class_check(cc, klass));
2041 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2042 VM_ASSERT(!vm_cc_super_p(cc));
2043 VM_ASSERT(!vm_cc_refinement_p(cc));
2044 }
2045 return TRUE;
2046}
2047#endif
2048
2049const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2050
2051static const struct rb_callcache *
2052vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2053{
2054 const ID mid = vm_ci_mid(ci);
2055 struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2056 struct rb_class_cc_entries *ccs = NULL;
2057 VALUE ccs_data;
2058
2059 if (cc_tbl) {
2060 // CCS data is keyed on method id, so we don't need the method id
2061 // for doing comparisons in the `for` loop below.
2062 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2063 ccs = (struct rb_class_cc_entries *)ccs_data;
2064 const int ccs_len = ccs->len;
2065
2066 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2067 rb_vm_ccs_free(ccs);
2068 rb_id_table_delete(cc_tbl, mid);
2069 ccs = NULL;
2070 }
2071 else {
2072 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2073
2074 // We already know the method id is correct because we had
2075 // to look up the ccs_data by method id. All we need to
2076 // compare is argc and flag
2077 unsigned int argc = vm_ci_argc(ci);
2078 unsigned int flag = vm_ci_flag(ci);
2079
2080 for (int i=0; i<ccs_len; i++) {
2081 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2082 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2083 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2084
2085 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2086
2087 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2088 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2089
2090 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2091 VM_ASSERT(ccs_cc->klass == klass);
2092 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2093
2094 return ccs_cc;
2095 }
2096 }
2097 }
2098 }
2099 }
2100 else {
2101 cc_tbl = rb_id_table_create(2);
2102 RCLASS_WRITE_CC_TBL(klass, cc_tbl);
2103 }
2104
2105 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2106
2107 const rb_callable_method_entry_t *cme;
2108
2109 if (ccs) {
2110 cme = ccs->cme;
2111 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2112
2113 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2114 }
2115 else {
2116 cme = rb_callable_method_entry(klass, mid);
2117 }
2118
2119 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2120
2121 if (cme == NULL) {
2122 // undef or not found: can't cache the information
2123 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2124 return &vm_empty_cc;
2125 }
2126
2127 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2128
2129 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2130
2131 if (ccs == NULL) {
2132 VM_ASSERT(cc_tbl != NULL);
2133
2134 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2135 // rb_callable_method_entry() prepares ccs.
2136 ccs = (struct rb_class_cc_entries *)ccs_data;
2137 }
2138 else {
2139 // TODO: required?
2140 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2141 }
2142 }
2143
2144 cme = rb_check_overloaded_cme(cme, ci);
2145
2146 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2147 vm_ccs_push(klass, ccs, ci, cc);
2148
2149 VM_ASSERT(vm_cc_cme(cc) != NULL);
2150 VM_ASSERT(cme->called_id == mid);
2151 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2152
2153 return cc;
2154}
2155
2156const struct rb_callcache *
2157rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2158{
2159 const struct rb_callcache *cc;
2160
2161 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2162
2163 RB_VM_LOCKING() {
2164 cc = vm_search_cc(klass, ci);
2165
2166 VM_ASSERT(cc);
2167 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2168 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2169 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2170 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2171 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2172 }
2173
2174 return cc;
2175}
2176
2177static const struct rb_callcache *
2178vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2179{
2180#if USE_DEBUG_COUNTER
2181 const struct rb_callcache *old_cc = cd->cc;
2182#endif
2183
2184 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2185
2186#if OPT_INLINE_METHOD_CACHE
2187 cd->cc = cc;
2188
2189 const struct rb_callcache *empty_cc = &vm_empty_cc;
2190 if (cd_owner && cc != empty_cc) {
2191 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2192 }
2193
2194#if USE_DEBUG_COUNTER
2195 if (!old_cc || old_cc == empty_cc) {
2196 // empty
2197 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2198 }
2199 else if (old_cc == cc) {
2200 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2201 }
2202 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2203 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2204 }
2205 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2206 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2207 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2208 }
2209 else {
2210 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2211 }
2212#endif
2213#endif // OPT_INLINE_METHOD_CACHE
2214
2215 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2216 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2217
2218 return cc;
2219}
2220
2221ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2222static const struct rb_callcache *
2223vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2224{
2225 const struct rb_callcache *cc = cd->cc;
2226
2227#if OPT_INLINE_METHOD_CACHE
2228 if (LIKELY(vm_cc_class_check(cc, klass))) {
2229 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2230 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2231 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2232 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2233 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2234 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2235
2236 return cc;
2237 }
2238 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2239 }
2240 else {
2241 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2242 }
2243#endif
2244
2245 return vm_search_method_slowpath0(cd_owner, cd, klass);
2246}
2247
2248static const struct rb_callcache *
2249vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2250{
2251 VALUE klass = CLASS_OF(recv);
2252 VM_ASSERT(klass != Qfalse);
2253 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2254
2255 return vm_search_method_fastpath(cd_owner, cd, klass);
2256}
2257
2258#if __has_attribute(transparent_union)
2259typedef union {
2260 VALUE (*anyargs)(ANYARGS);
2261 VALUE (*f00)(VALUE);
2262 VALUE (*f01)(VALUE, VALUE);
2263 VALUE (*f02)(VALUE, VALUE, VALUE);
2264 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2265 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2266 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2267 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2268 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2277 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2278} __attribute__((__transparent_union__)) cfunc_type;
2279# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2280#else
2281typedef VALUE (*cfunc_type)(ANYARGS);
2282# define make_cfunc_type(f) (cfunc_type)(f)
2283#endif
2284
2285static inline int
2286check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2287{
2288 if (! me) {
2289 return false;
2290 }
2291 else {
2292 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2293 VM_ASSERT(callable_method_entry_p(me));
2294 VM_ASSERT(me->def);
2295 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2296 return false;
2297 }
2298 else {
2299#if __has_attribute(transparent_union)
2300 return me->def->body.cfunc.func == func.anyargs;
2301#else
2302 return me->def->body.cfunc.func == func;
2303#endif
2304 }
2305 }
2306}
2307
2308static inline int
2309check_method_basic_definition(const rb_callable_method_entry_t *me)
2310{
2311 return me && METHOD_ENTRY_BASIC(me);
2312}
2313
2314static inline int
2315vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2316{
2317 VM_ASSERT(iseq != NULL);
2318 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2319 return check_cfunc(vm_cc_cme(cc), func);
2320}
2321
2322#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2323#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2324
2325#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2326
2327static inline bool
2328FIXNUM_2_P(VALUE a, VALUE b)
2329{
2330 /* FIXNUM_P(a) && FIXNUM_P(b)
2331 * == ((a & 1) && (b & 1))
2332 * == a & b & 1 */
2333 SIGNED_VALUE x = a;
2334 SIGNED_VALUE y = b;
2335 SIGNED_VALUE z = x & y & 1;
2336 return z == 1;
2337}
2338
2339static inline bool
2340FLONUM_2_P(VALUE a, VALUE b)
2341{
2342#if USE_FLONUM
2343 /* FLONUM_P(a) && FLONUM_P(b)
2344 * == ((a & 3) == 2) && ((b & 3) == 2)
2345 * == ! ((a ^ 2) | (b ^ 2) & 3)
2346 */
2347 SIGNED_VALUE x = a;
2348 SIGNED_VALUE y = b;
2349 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2350 return !z;
2351#else
2352 return false;
2353#endif
2354}
2355
2356static VALUE
2357opt_equality_specialized(VALUE recv, VALUE obj)
2358{
2359 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2360 goto compare_by_identity;
2361 }
2362 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2363 goto compare_by_identity;
2364 }
2365 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2366 goto compare_by_identity;
2367 }
2368 else if (SPECIAL_CONST_P(recv)) {
2369 //
2370 }
2371 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2372 double a = RFLOAT_VALUE(recv);
2373 double b = RFLOAT_VALUE(obj);
2374
2375#if MSC_VERSION_BEFORE(1300)
2376 if (isnan(a)) {
2377 return Qfalse;
2378 }
2379 else if (isnan(b)) {
2380 return Qfalse;
2381 }
2382 else
2383#endif
2384 return RBOOL(a == b);
2385 }
2386 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2387 if (recv == obj) {
2388 return Qtrue;
2389 }
2390 else if (RB_TYPE_P(obj, T_STRING)) {
2391 return rb_str_eql_internal(obj, recv);
2392 }
2393 }
2394 return Qundef;
2395
2396 compare_by_identity:
2397 return RBOOL(recv == obj);
2398}
2399
2400static VALUE
2401opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2402{
2403 VM_ASSERT(cd_owner != NULL);
2404
2405 VALUE val = opt_equality_specialized(recv, obj);
2406 if (!UNDEF_P(val)) return val;
2407
2408 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2409 return Qundef;
2410 }
2411 else {
2412 return RBOOL(recv == obj);
2413 }
2414}
2415
2416#undef EQ_UNREDEFINED_P
2417
2418static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2419NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2420
2421static VALUE
2422opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2423{
2424 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2425
2426 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2427 return RBOOL(recv == obj);
2428 }
2429 else {
2430 return Qundef;
2431 }
2432}
2433
2434static VALUE
2435opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2436{
2437 VALUE val = opt_equality_specialized(recv, obj);
2438 if (!UNDEF_P(val)) {
2439 return val;
2440 }
2441 else {
2442 return opt_equality_by_mid_slowpath(recv, obj, mid);
2443 }
2444}
2445
2446VALUE
2447rb_equal_opt(VALUE obj1, VALUE obj2)
2448{
2449 return opt_equality_by_mid(obj1, obj2, idEq);
2450}
2451
2452VALUE
2453rb_eql_opt(VALUE obj1, VALUE obj2)
2454{
2455 return opt_equality_by_mid(obj1, obj2, idEqlP);
2456}
2457
2458extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2459extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2460
2461static VALUE
2462check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2463{
2464 switch (type) {
2465 case VM_CHECKMATCH_TYPE_WHEN:
2466 return pattern;
2467 case VM_CHECKMATCH_TYPE_RESCUE:
2468 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2469 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2470 }
2471 /* fall through */
2472 case VM_CHECKMATCH_TYPE_CASE: {
2473 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2474 }
2475 default:
2476 rb_bug("check_match: unreachable");
2477 }
2478}
2479
2480
2481#if MSC_VERSION_BEFORE(1300)
2482#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2483#else
2484#define CHECK_CMP_NAN(a, b) /* do nothing */
2485#endif
2486
2487static inline VALUE
2488double_cmp_lt(double a, double b)
2489{
2490 CHECK_CMP_NAN(a, b);
2491 return RBOOL(a < b);
2492}
2493
2494static inline VALUE
2495double_cmp_le(double a, double b)
2496{
2497 CHECK_CMP_NAN(a, b);
2498 return RBOOL(a <= b);
2499}
2500
2501static inline VALUE
2502double_cmp_gt(double a, double b)
2503{
2504 CHECK_CMP_NAN(a, b);
2505 return RBOOL(a > b);
2506}
2507
2508static inline VALUE
2509double_cmp_ge(double a, double b)
2510{
2511 CHECK_CMP_NAN(a, b);
2512 return RBOOL(a >= b);
2513}
2514
2515// Copied by vm_dump.c
2516static inline VALUE *
2517vm_base_ptr(const rb_control_frame_t *cfp)
2518{
2519 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2520
2521 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2522 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2523
2524 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2525 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2526 int params = ISEQ_BODY(cfp->iseq)->param.size;
2527
2528 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2529 bp += vm_ci_argc(ci);
2530 }
2531
2532 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2533 /* adjust `self' */
2534 bp += 1;
2535 }
2536#if VM_DEBUG_BP_CHECK
2537 if (bp != cfp->bp_check) {
2538 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2539 (long)(cfp->bp_check - GET_EC()->vm_stack),
2540 (long)(bp - GET_EC()->vm_stack));
2541 rb_bug("vm_base_ptr: unreachable");
2542 }
2543#endif
2544 return bp;
2545 }
2546 else {
2547 return NULL;
2548 }
2549}
2550
2551VALUE *
2552rb_vm_base_ptr(const rb_control_frame_t *cfp)
2553{
2554 return vm_base_ptr(cfp);
2555}
2556
2557/* method call processes with call_info */
2558
2559#include "vm_args.c"
2560
2561static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2562ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2563static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2564static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2565static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2566static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2567static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2568
2569static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2570
2571static VALUE
2572vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2573{
2574 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2575
2576 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2577}
2578
2579static VALUE
2580vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2581{
2582 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2583
2584 const struct rb_callcache *cc = calling->cc;
2585 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2586 int param = ISEQ_BODY(iseq)->param.size;
2587 int local = ISEQ_BODY(iseq)->local_table_size;
2588 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2589}
2590
2591bool
2592rb_simple_iseq_p(const rb_iseq_t *iseq)
2593{
2594 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2595 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2596 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2597 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2598 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2599 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2600 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2601 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2602}
2603
2604bool
2605rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2606{
2607 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2608 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2609 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2610 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2611 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2612 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2613 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2614 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2615}
2616
2617bool
2618rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2619{
2620 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2621 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2622 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2623 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2624 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2627}
2628
2629#define ALLOW_HEAP_ARGV (-2)
2630#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2631
2632static inline bool
2633vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2634{
2635 vm_check_canary(GET_EC(), cfp->sp);
2636 bool ret = false;
2637
2638 if (!NIL_P(ary)) {
2639 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2640 long len = RARRAY_LEN(ary);
2641 int argc = calling->argc;
2642
2643 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2644 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2645 * a temporary array, instead of trying to keeping arguments on the VM stack.
2646 */
2647 VALUE *argv = cfp->sp - argc;
2648 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2649 rb_ary_cat(argv_ary, argv, argc);
2650 rb_ary_cat(argv_ary, ptr, len);
2651 cfp->sp -= argc - 1;
2652 cfp->sp[-1] = argv_ary;
2653 calling->argc = 1;
2654 calling->heap_argv = argv_ary;
2655 RB_GC_GUARD(ary);
2656 }
2657 else {
2658 long i;
2659
2660 if (max_args >= 0 && len + argc > max_args) {
2661 /* If only a given max_args is allowed, copy up to max args.
2662 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2663 * where additional arguments are ignored.
2664 *
2665 * Also, copy up to one more argument than the maximum,
2666 * in case it is an empty keyword hash that will be removed.
2667 */
2668 calling->argc += len - (max_args - argc + 1);
2669 len = max_args - argc + 1;
2670 ret = true;
2671 }
2672 else {
2673 /* Unset heap_argv if set originally. Can happen when
2674 * forwarding modified arguments, where heap_argv was used
2675 * originally, but heap_argv not supported by the forwarded
2676 * method in all cases.
2677 */
2678 calling->heap_argv = 0;
2679 }
2680 CHECK_VM_STACK_OVERFLOW(cfp, len);
2681
2682 for (i = 0; i < len; i++) {
2683 *cfp->sp++ = ptr[i];
2684 }
2685 calling->argc += i;
2686 }
2687 }
2688
2689 return ret;
2690}
2691
2692static inline void
2693vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2694{
2695 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2696 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2697 const VALUE h = rb_hash_new_with_size(kw_len);
2698 VALUE *sp = cfp->sp;
2699 int i;
2700
2701 for (i=0; i<kw_len; i++) {
2702 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2703 }
2704 (sp-kw_len)[0] = h;
2705
2706 cfp->sp -= kw_len - 1;
2707 calling->argc -= kw_len - 1;
2708 calling->kw_splat = 1;
2709}
2710
2711static inline VALUE
2712vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2713{
2714 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2715 if (keyword_hash != Qnil) {
2716 /* Convert a non-hash keyword splat to a new hash */
2717 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2718 }
2719 }
2720 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2721 /* Convert a hash keyword splat to a new hash unless
2722 * a mutable keyword splat was passed.
2723 * Skip allocating new hash for empty keyword splat, as empty
2724 * keyword splat will be ignored by both callers.
2725 */
2726 keyword_hash = rb_hash_dup(keyword_hash);
2727 }
2728 return keyword_hash;
2729}
2730
2731static inline void
2732CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2733 struct rb_calling_info *restrict calling,
2734 const struct rb_callinfo *restrict ci, int max_args)
2735{
2736 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2737 if (IS_ARGS_KW_SPLAT(ci)) {
2738 // f(*a, **kw)
2739 VM_ASSERT(calling->kw_splat == 1);
2740
2741 cfp->sp -= 2;
2742 calling->argc -= 2;
2743 VALUE ary = cfp->sp[0];
2744 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2745
2746 // splat a
2747 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2748
2749 // put kw
2750 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2751 if (UNLIKELY(calling->heap_argv)) {
2752 rb_ary_push(calling->heap_argv, kwh);
2753 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2754 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2755 calling->kw_splat = 0;
2756 }
2757 }
2758 else {
2759 cfp->sp[0] = kwh;
2760 cfp->sp++;
2761 calling->argc++;
2762
2763 VM_ASSERT(calling->kw_splat == 1);
2764 }
2765 }
2766 else {
2767 calling->kw_splat = 0;
2768 }
2769 }
2770 else {
2771 // f(*a)
2772 VM_ASSERT(calling->kw_splat == 0);
2773
2774 cfp->sp -= 1;
2775 calling->argc -= 1;
2776 VALUE ary = cfp->sp[0];
2777
2778 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2779 goto check_keyword;
2780 }
2781
2782 // check the last argument
2783 VALUE last_hash, argv_ary;
2784 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2785 if (!IS_ARGS_KEYWORD(ci) &&
2786 RARRAY_LEN(argv_ary) > 0 &&
2787 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2788 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2789
2790 rb_ary_pop(argv_ary);
2791 if (!RHASH_EMPTY_P(last_hash)) {
2792 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2793 calling->kw_splat = 1;
2794 }
2795 }
2796 }
2797 else {
2798check_keyword:
2799 if (!IS_ARGS_KEYWORD(ci) &&
2800 calling->argc > 0 &&
2801 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2802 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2803
2804 if (RHASH_EMPTY_P(last_hash)) {
2805 calling->argc--;
2806 cfp->sp -= 1;
2807 }
2808 else {
2809 cfp->sp[-1] = rb_hash_dup(last_hash);
2810 calling->kw_splat = 1;
2811 }
2812 }
2813 }
2814 }
2815 }
2816 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2817 // f(**kw)
2818 VM_ASSERT(calling->kw_splat == 1);
2819 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2820
2821 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2822 cfp->sp--;
2823 calling->argc--;
2824 calling->kw_splat = 0;
2825 }
2826 else {
2827 cfp->sp[-1] = kwh;
2828 }
2829 }
2830 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2831 // f(k1:1, k2:2)
2832 VM_ASSERT(calling->kw_splat == 0);
2833
2834 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2835 * by creating a keyword hash.
2836 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2837 */
2838 vm_caller_setup_arg_kw(cfp, calling, ci);
2839 }
2840}
2841
2842#define USE_OPT_HIST 0
2843
2844#if USE_OPT_HIST
2845#define OPT_HIST_MAX 64
2846static int opt_hist[OPT_HIST_MAX+1];
2847
2848__attribute__((destructor))
2849static void
2850opt_hist_show_results_at_exit(void)
2851{
2852 for (int i=0; i<OPT_HIST_MAX; i++) {
2853 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2854 }
2855}
2856#endif
2857
2858static VALUE
2859vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2860 struct rb_calling_info *calling)
2861{
2862 const struct rb_callcache *cc = calling->cc;
2863 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2864 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2865 const int opt = calling->argc - lead_num;
2866 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2867 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2868 const int param = ISEQ_BODY(iseq)->param.size;
2869 const int local = ISEQ_BODY(iseq)->local_table_size;
2870 const int delta = opt_num - opt;
2871
2872 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2873
2874#if USE_OPT_HIST
2875 if (opt_pc < OPT_HIST_MAX) {
2876 opt_hist[opt]++;
2877 }
2878 else {
2879 opt_hist[OPT_HIST_MAX]++;
2880 }
2881#endif
2882
2883 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2884}
2885
2886static VALUE
2887vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2888 struct rb_calling_info *calling)
2889{
2890 const struct rb_callcache *cc = calling->cc;
2891 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2892 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2893 const int opt = calling->argc - lead_num;
2894 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2895
2896 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2897
2898#if USE_OPT_HIST
2899 if (opt_pc < OPT_HIST_MAX) {
2900 opt_hist[opt]++;
2901 }
2902 else {
2903 opt_hist[OPT_HIST_MAX]++;
2904 }
2905#endif
2906
2907 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2908}
2909
2910static void
2911args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2912 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2913 VALUE *const locals);
2914
2915static VALUE
2916vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2917 struct rb_calling_info *calling)
2918{
2919 const struct rb_callcache *cc = calling->cc;
2920 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2921 int param_size = ISEQ_BODY(iseq)->param.size;
2922 int local_size = ISEQ_BODY(iseq)->local_table_size;
2923
2924 // Setting up local size and param size
2925 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2926
2927 local_size = local_size + vm_ci_argc(calling->cd->ci);
2928 param_size = param_size + vm_ci_argc(calling->cd->ci);
2929
2930 cfp->sp[0] = (VALUE)calling->cd->ci;
2931
2932 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2933}
2934
2935static VALUE
2936vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2937 struct rb_calling_info *calling)
2938{
2939 const struct rb_callinfo *ci = calling->cd->ci;
2940 const struct rb_callcache *cc = calling->cc;
2941
2942 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2943 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2944
2945 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2946 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2947 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2948 const int ci_kw_len = kw_arg->keyword_len;
2949 const VALUE * const ci_keywords = kw_arg->keywords;
2950 VALUE *argv = cfp->sp - calling->argc;
2951 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2952 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2953 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2954 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2955 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2956
2957 int param = ISEQ_BODY(iseq)->param.size;
2958 int local = ISEQ_BODY(iseq)->local_table_size;
2959 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2960}
2961
2962static VALUE
2963vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2964 struct rb_calling_info *calling)
2965{
2966 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2967 const struct rb_callcache *cc = calling->cc;
2968
2969 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2970 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2971
2972 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2973 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2974 VALUE * const argv = cfp->sp - calling->argc;
2975 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
2976
2977 int i;
2978 for (i=0; i<kw_param->num; i++) {
2979 klocals[i] = kw_param->default_values[i];
2980 }
2981 klocals[i] = INT2FIX(0); // kw specify flag
2982 // NOTE:
2983 // nobody check this value, but it should be cleared because it can
2984 // points invalid VALUE (T_NONE objects, raw pointer and so on).
2985
2986 int param = ISEQ_BODY(iseq)->param.size;
2987 int local = ISEQ_BODY(iseq)->local_table_size;
2988 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2989}
2990
2991static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
2992
2993static VALUE
2994vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2995 struct rb_calling_info *calling)
2996{
2997 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
2998 cfp->sp -= (calling->argc + 1);
2999 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3000 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3001}
3002
3003VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3004
3005static void
3006warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3007{
3008 rb_vm_t *vm = GET_VM();
3009 set_table *dup_check_table = vm->unused_block_warning_table;
3010 st_data_t key;
3011 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3012
3013 union {
3014 VALUE v;
3015 unsigned char b[SIZEOF_VALUE];
3016 } k1 = {
3017 .v = (VALUE)pc,
3018 }, k2 = {
3019 .v = (VALUE)cme->def,
3020 };
3021
3022 // relax check
3023 if (!strict_unused_block) {
3024 key = (st_data_t)cme->def->original_id;
3025
3026 if (set_lookup(dup_check_table, key)) {
3027 return;
3028 }
3029 }
3030
3031 // strict check
3032 // make unique key from pc and me->def pointer
3033 key = 0;
3034 for (int i=0; i<SIZEOF_VALUE; i++) {
3035 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3036 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3037 }
3038
3039 if (0) {
3040 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3041 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3042 fprintf(stderr, "key:%p\n", (void *)key);
3043 }
3044
3045 // duplication check
3046 if (set_insert(dup_check_table, key)) {
3047 // already shown
3048 }
3049 else if (RTEST(ruby_verbose) || strict_unused_block) {
3050 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3051 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3052
3053 if (!NIL_P(m_loc)) {
3054 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3055 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3056 }
3057 else {
3058 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3059 }
3060 }
3061}
3062
3063static inline int
3064vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3065 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3066{
3067 const struct rb_callinfo *ci = calling->cd->ci;
3068 const struct rb_callcache *cc = calling->cc;
3069
3070 VM_ASSERT((vm_ci_argc(ci), 1));
3071 VM_ASSERT(vm_cc_cme(cc) != NULL);
3072
3073 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3074 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3075 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3076 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3077 }
3078
3079 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3080 if (LIKELY(rb_simple_iseq_p(iseq))) {
3081 rb_control_frame_t *cfp = ec->cfp;
3082 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3083 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3084
3085 if (calling->argc != lead_num) {
3086 argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3087 }
3088
3089 //VM_ASSERT(ci == calling->cd->ci);
3090 VM_ASSERT(cc == calling->cc);
3091
3092 if (vm_call_iseq_optimizable_p(ci, cc)) {
3093 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3094 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3095 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3096 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3097 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3098 }
3099 else {
3100 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3101 }
3102 }
3103 return 0;
3104 }
3105 else if (rb_iseq_only_optparam_p(iseq)) {
3106 rb_control_frame_t *cfp = ec->cfp;
3107
3108 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3109 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3110
3111 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3112 const int argc = calling->argc;
3113 const int opt = argc - lead_num;
3114
3115 if (opt < 0 || opt > opt_num) {
3116 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3117 }
3118
3119 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3120 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3121 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3122 vm_call_cacheable(ci, cc));
3123 }
3124 else {
3125 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3126 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3127 vm_call_cacheable(ci, cc));
3128 }
3129
3130 /* initialize opt vars for self-references */
3131 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3132 for (int i=argc; i<lead_num + opt_num; i++) {
3133 argv[i] = Qnil;
3134 }
3135 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3136 }
3137 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3138 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3139 const int argc = calling->argc;
3140 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3141
3142 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3143 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3144
3145 if (argc - kw_arg->keyword_len == lead_num) {
3146 const int ci_kw_len = kw_arg->keyword_len;
3147 const VALUE * const ci_keywords = kw_arg->keywords;
3148 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3149 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3150
3151 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3152 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3153
3154 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3155 vm_call_cacheable(ci, cc));
3156
3157 return 0;
3158 }
3159 }
3160 else if (argc == lead_num) {
3161 /* no kwarg */
3162 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3163 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3164
3165 if (klocals[kw_param->num] == INT2FIX(0)) {
3166 /* copy from default_values */
3167 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3168 vm_call_cacheable(ci, cc));
3169 }
3170
3171 return 0;
3172 }
3173 }
3174 }
3175
3176 // Called iseq is using ... param
3177 // def foo(...) # <- iseq for foo will have "forwardable"
3178 //
3179 // We want to set the `...` local to the caller's CI
3180 // foo(1, 2) # <- the ci for this should end up as `...`
3181 //
3182 // So hopefully the stack looks like:
3183 //
3184 // => 1
3185 // => 2
3186 // => *
3187 // => **
3188 // => &
3189 // => ... # <- points at `foo`s CI
3190 // => cref_or_me
3191 // => specval
3192 // => type
3193 //
3194 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3195 bool can_fastpath = true;
3196
3197 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3198 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3199 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3200 ci = vm_ci_new_runtime(
3201 vm_ci_mid(ci),
3202 vm_ci_flag(ci),
3203 vm_ci_argc(ci),
3204 vm_ci_kwarg(ci));
3205 }
3206 else {
3207 ci = forward_cd->caller_ci;
3208 }
3209 can_fastpath = false;
3210 }
3211 // C functions calling iseqs will stack allocate a CI,
3212 // so we need to convert it to heap allocated
3213 if (!vm_ci_markable(ci)) {
3214 ci = vm_ci_new_runtime(
3215 vm_ci_mid(ci),
3216 vm_ci_flag(ci),
3217 vm_ci_argc(ci),
3218 vm_ci_kwarg(ci));
3219 can_fastpath = false;
3220 }
3221 argv[param_size - 1] = (VALUE)ci;
3222 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3223 return 0;
3224 }
3225
3226 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3227}
3228
3229static void
3230vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3231{
3232 // This case is when the caller is using a ... parameter.
3233 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3234 // In this case the caller's caller's CI will be on the stack.
3235 //
3236 // For example:
3237 //
3238 // def bar(a, b); a + b; end
3239 // def foo(...); bar(...); end
3240 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3241 //
3242 // Stack layout will be:
3243 //
3244 // > 1
3245 // > 2
3246 // > CI for foo(1, 2)
3247 // > cref_or_me
3248 // > specval
3249 // > type
3250 // > receiver
3251 // > CI for foo(1, 2), via `getlocal ...`
3252 // > ( SP points here )
3253 const VALUE * lep = VM_CF_LEP(cfp);
3254
3255 const rb_iseq_t *iseq;
3256
3257 // If we're in an escaped environment (lambda for example), get the iseq
3258 // from the captured env.
3259 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3260 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3261 iseq = env->iseq;
3262 }
3263 else { // Otherwise use the lep to find the caller
3264 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3265 }
3266
3267 // Our local storage is below the args we need to copy
3268 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3269
3270 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3271 VALUE * to = cfp->sp - 1; // clobber the CI
3272
3273 if (RTEST(splat)) {
3274 to -= 1; // clobber the splat array
3275 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3276 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3277 to += RARRAY_LEN(splat);
3278 }
3279
3280 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3281 MEMCPY(to, from, VALUE, argc);
3282 cfp->sp = to + argc;
3283
3284 // Stack layout should now be:
3285 //
3286 // > 1
3287 // > 2
3288 // > CI for foo(1, 2)
3289 // > cref_or_me
3290 // > specval
3291 // > type
3292 // > receiver
3293 // > 1
3294 // > 2
3295 // > ( SP points here )
3296}
3297
3298static VALUE
3299vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3300{
3301 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3302
3303 const struct rb_callcache *cc = calling->cc;
3304 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3305 int param_size = ISEQ_BODY(iseq)->param.size;
3306 int local_size = ISEQ_BODY(iseq)->local_table_size;
3307
3308 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3309
3310 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3311 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3312}
3313
3314static VALUE
3315vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3316{
3317 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3318
3319 const struct rb_callcache *cc = calling->cc;
3320 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3321 int param_size = ISEQ_BODY(iseq)->param.size;
3322 int local_size = ISEQ_BODY(iseq)->local_table_size;
3323
3324 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3325
3326 // Setting up local size and param size
3327 local_size = local_size + vm_ci_argc(calling->cd->ci);
3328 param_size = param_size + vm_ci_argc(calling->cd->ci);
3329
3330 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3331 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3332}
3333
3334static inline VALUE
3335vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3336 int opt_pc, int param_size, int local_size)
3337{
3338 const struct rb_callinfo *ci = calling->cd->ci;
3339 const struct rb_callcache *cc = calling->cc;
3340
3341 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3342 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3343 }
3344 else {
3345 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3346 }
3347}
3348
3349static inline VALUE
3350vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3351 int opt_pc, int param_size, int local_size)
3352{
3353 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3354 VALUE *argv = cfp->sp - calling->argc;
3355 VALUE *sp = argv + param_size;
3356 cfp->sp = argv - 1 /* recv */;
3357
3358 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3359 calling->block_handler, (VALUE)me,
3360 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3361 local_size - param_size,
3362 ISEQ_BODY(iseq)->stack_max);
3363 return Qundef;
3364}
3365
3366static inline VALUE
3367vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3368{
3369 const struct rb_callcache *cc = calling->cc;
3370 unsigned int i;
3371 VALUE *argv = cfp->sp - calling->argc;
3372 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3373 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3374 VALUE *src_argv = argv;
3375 VALUE *sp_orig, *sp;
3376 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3377
3378 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3379 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3380 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3381 dst_captured->code.val = src_captured->code.val;
3382 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3383 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3384 }
3385 else {
3386 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3387 }
3388 }
3389
3390 vm_pop_frame(ec, cfp, cfp->ep);
3391 cfp = ec->cfp;
3392
3393 sp_orig = sp = cfp->sp;
3394
3395 /* push self */
3396 sp[0] = calling->recv;
3397 sp++;
3398
3399 /* copy arguments */
3400 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3401 *sp++ = src_argv[i];
3402 }
3403
3404 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3405 calling->recv, calling->block_handler, (VALUE)me,
3406 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3407 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3408 ISEQ_BODY(iseq)->stack_max);
3409
3410 cfp->sp = sp_orig;
3411
3412 return Qundef;
3413}
3414
3415static void
3416ractor_unsafe_check(void)
3417{
3418 if (!rb_ractor_main_p()) {
3419 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3420 }
3421}
3422
3423static VALUE
3424call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3425{
3426 ractor_unsafe_check();
3427 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3428 return (*f)(recv, rb_ary_new4(argc, argv));
3429}
3430
3431static VALUE
3432call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3433{
3434 ractor_unsafe_check();
3435 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3436 return (*f)(argc, argv, recv);
3437}
3438
3439static VALUE
3440call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3441{
3442 ractor_unsafe_check();
3443 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3444 return (*f)(recv);
3445}
3446
3447static VALUE
3448call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3449{
3450 ractor_unsafe_check();
3451 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3452 return (*f)(recv, argv[0]);
3453}
3454
3455static VALUE
3456call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3457{
3458 ractor_unsafe_check();
3459 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3460 return (*f)(recv, argv[0], argv[1]);
3461}
3462
3463static VALUE
3464call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3465{
3466 ractor_unsafe_check();
3467 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3468 return (*f)(recv, argv[0], argv[1], argv[2]);
3469}
3470
3471static VALUE
3472call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3473{
3474 ractor_unsafe_check();
3475 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3476 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3477}
3478
3479static VALUE
3480call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3481{
3482 ractor_unsafe_check();
3483 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3484 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3485}
3486
3487static VALUE
3488call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3489{
3490 ractor_unsafe_check();
3492 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3493}
3494
3495static VALUE
3496call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3497{
3498 ractor_unsafe_check();
3500 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3501}
3502
3503static VALUE
3504call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3505{
3506 ractor_unsafe_check();
3508 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3509}
3510
3511static VALUE
3512call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3513{
3514 ractor_unsafe_check();
3516 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3517}
3518
3519static VALUE
3520call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3521{
3522 ractor_unsafe_check();
3524 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3525}
3526
3527static VALUE
3528call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3529{
3530 ractor_unsafe_check();
3532 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3533}
3534
3535static VALUE
3536call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3537{
3538 ractor_unsafe_check();
3540 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3541}
3542
3543static VALUE
3544call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3545{
3546 ractor_unsafe_check();
3548 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3549}
3550
3551static VALUE
3552call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3553{
3554 ractor_unsafe_check();
3556 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3557}
3558
3559static VALUE
3560call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3561{
3562 ractor_unsafe_check();
3564 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3565}
3566
3567static VALUE
3568ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3569{
3570 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3571 return (*f)(recv, rb_ary_new4(argc, argv));
3572}
3573
3574static VALUE
3575ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3576{
3577 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3578 return (*f)(argc, argv, recv);
3579}
3580
3581static VALUE
3582ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3583{
3584 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3585 return (*f)(recv);
3586}
3587
3588static VALUE
3589ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3590{
3591 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3592 return (*f)(recv, argv[0]);
3593}
3594
3595static VALUE
3596ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3597{
3598 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3599 return (*f)(recv, argv[0], argv[1]);
3600}
3601
3602static VALUE
3603ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3604{
3605 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3606 return (*f)(recv, argv[0], argv[1], argv[2]);
3607}
3608
3609static VALUE
3610ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3611{
3612 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3613 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3614}
3615
3616static VALUE
3617ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3618{
3619 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3620 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3621}
3622
3623static VALUE
3624ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3625{
3627 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3628}
3629
3630static VALUE
3631ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3632{
3634 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3635}
3636
3637static VALUE
3638ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3639{
3641 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3642}
3643
3644static VALUE
3645ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3646{
3648 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3649}
3650
3651static VALUE
3652ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3653{
3655 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3656}
3657
3658static VALUE
3659ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3660{
3662 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3663}
3664
3665static VALUE
3666ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3667{
3669 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3670}
3671
3672static VALUE
3673ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3674{
3676 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3677}
3678
3679static VALUE
3680ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3681{
3683 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3684}
3685
3686static VALUE
3687ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3688{
3690 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3691}
3692
3693static inline int
3694vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3695{
3696 const int ov_flags = RAISED_STACKOVERFLOW;
3697 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3698 if (rb_ec_raised_p(ec, ov_flags)) {
3699 rb_ec_raised_reset(ec, ov_flags);
3700 return TRUE;
3701 }
3702 return FALSE;
3703}
3704
3705#define CHECK_CFP_CONSISTENCY(func) \
3706 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3707 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3708
3709static inline
3710const rb_method_cfunc_t *
3711vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3712{
3713#if VM_DEBUG_VERIFY_METHOD_CACHE
3714 switch (me->def->type) {
3715 case VM_METHOD_TYPE_CFUNC:
3716 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3717 break;
3718# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3719 METHOD_BUG(ISEQ);
3720 METHOD_BUG(ATTRSET);
3721 METHOD_BUG(IVAR);
3722 METHOD_BUG(BMETHOD);
3723 METHOD_BUG(ZSUPER);
3724 METHOD_BUG(UNDEF);
3725 METHOD_BUG(OPTIMIZED);
3726 METHOD_BUG(MISSING);
3727 METHOD_BUG(REFINED);
3728 METHOD_BUG(ALIAS);
3729# undef METHOD_BUG
3730 default:
3731 rb_bug("wrong method type: %d", me->def->type);
3732 }
3733#endif
3734 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3735}
3736
3737static VALUE
3738vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3739 int argc, VALUE *argv, VALUE *stack_bottom)
3740{
3741 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3742 const struct rb_callinfo *ci = calling->cd->ci;
3743 const struct rb_callcache *cc = calling->cc;
3744 VALUE val;
3745 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3746 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3747
3748 VALUE recv = calling->recv;
3749 VALUE block_handler = calling->block_handler;
3750 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3751
3752 if (UNLIKELY(calling->kw_splat)) {
3753 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3754 }
3755
3756 VM_ASSERT(reg_cfp == ec->cfp);
3757
3758 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3759 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3760
3761 vm_push_frame(ec, NULL, frame_type, recv,
3762 block_handler, (VALUE)me,
3763 0, ec->cfp->sp, 0, 0);
3764
3765 int len = cfunc->argc;
3766 if (len >= 0) rb_check_arity(argc, len, len);
3767
3768 reg_cfp->sp = stack_bottom;
3769 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3770
3771 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3772
3773 rb_vm_pop_frame(ec);
3774
3775 VM_ASSERT(ec->cfp->sp == stack_bottom);
3776
3777 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3778 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3779
3780 return val;
3781}
3782
3783// Push a C method frame for a given cme. This is called when JIT code skipped
3784// pushing a frame but the C method reached a point where a frame is needed.
3785void
3786rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3787{
3788 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3789 rb_execution_context_t *ec = GET_EC();
3790 VALUE *sp = ec->cfp->sp;
3791 VALUE recv = *(sp - recv_idx - 1);
3792 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3793 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3794#if VM_CHECK_MODE > 0
3795 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3796 *(GET_EC()->cfp->sp) = Qfalse;
3797#endif
3798 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3799}
3800
3801// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3802bool
3803rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3804{
3805 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3806}
3807
3808static VALUE
3809vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3810{
3811 int argc = calling->argc;
3812 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3813 VALUE *argv = &stack_bottom[1];
3814
3815 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3816}
3817
3818static VALUE
3819vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3820{
3821 const struct rb_callinfo *ci = calling->cd->ci;
3822 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3823
3824 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3825 VALUE argv_ary;
3826 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3827 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3828 int argc = RARRAY_LENINT(argv_ary);
3829 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3830 VALUE *stack_bottom = reg_cfp->sp - 2;
3831
3832 VM_ASSERT(calling->argc == 1);
3833 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3834 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3835
3836 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3837 }
3838 else {
3839 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3840
3841 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3842 }
3843}
3844
3845static inline VALUE
3846vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3847{
3848 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3849 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3850
3851 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3852 return vm_call_cfunc_other(ec, reg_cfp, calling);
3853 }
3854
3855 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3856 calling->kw_splat = 0;
3857 int i;
3858 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3859 VALUE *sp = stack_bottom;
3860 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3861 for(i = 0; i < argc; i++) {
3862 *++sp = argv[i];
3863 }
3864 reg_cfp->sp = sp+1;
3865
3866 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3867}
3868
3869static inline VALUE
3870vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3871{
3872 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3873 VALUE argv_ary = reg_cfp->sp[-1];
3874 int argc = RARRAY_LENINT(argv_ary);
3875 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3876 VALUE last_hash;
3877 int argc_offset = 0;
3878
3879 if (UNLIKELY(argc > 0 &&
3880 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3881 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3882 if (!RHASH_EMPTY_P(last_hash)) {
3883 return vm_call_cfunc_other(ec, reg_cfp, calling);
3884 }
3885 argc_offset++;
3886 }
3887 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3888}
3889
3890static inline VALUE
3891vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3892{
3893 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3894 VALUE keyword_hash = reg_cfp->sp[-1];
3895
3896 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3897 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3898 }
3899
3900 return vm_call_cfunc_other(ec, reg_cfp, calling);
3901}
3902
3903static VALUE
3904vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3905{
3906 const struct rb_callinfo *ci = calling->cd->ci;
3907 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3908
3909 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3910 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3911 // f(*a)
3912 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3913 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3914 }
3915 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3916 // f(*a, **kw)
3917 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3918 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3919 }
3920 }
3921
3922 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3923 return vm_call_cfunc_other(ec, reg_cfp, calling);
3924}
3925
3926static VALUE
3927vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3928{
3929 const struct rb_callcache *cc = calling->cc;
3930 RB_DEBUG_COUNTER_INC(ccf_ivar);
3931 cfp->sp -= 1;
3932 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3933 return ivar;
3934}
3935
3936static VALUE
3937vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3938{
3939 RB_DEBUG_COUNTER_INC(ccf_attrset);
3940 VALUE val = *(cfp->sp - 1);
3941 cfp->sp -= 2;
3942 attr_index_t index;
3943 shape_id_t dest_shape_id;
3944 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
3945 ID id = vm_cc_cme(cc)->def->body.attr.id;
3946 rb_check_frozen(obj);
3947 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3948 if (UNDEF_P(res)) {
3949 switch (BUILTIN_TYPE(obj)) {
3950 case T_OBJECT:
3951 case T_CLASS:
3952 case T_MODULE:
3953 break;
3954 default:
3955 {
3956 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3957 if (!UNDEF_P(res)) {
3958 return res;
3959 }
3960 }
3961 }
3962 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3963 }
3964 return res;
3965}
3966
3967static VALUE
3968vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3969{
3970 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3971}
3972
3973static inline VALUE
3974vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3975{
3976 rb_proc_t *proc;
3977 VALUE val;
3978 const struct rb_callcache *cc = calling->cc;
3979 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3980 VALUE procv = cme->def->body.bmethod.proc;
3981
3982 if (!RB_OBJ_SHAREABLE_P(procv) &&
3983 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
3984 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
3985 }
3986
3987 /* control block frame */
3988 GetProcPtr(procv, proc);
3989 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
3990
3991 return val;
3992}
3993
3994static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
3995
3996static VALUE
3997vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3998{
3999 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4000
4001 const struct rb_callcache *cc = calling->cc;
4002 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4003 VALUE procv = cme->def->body.bmethod.proc;
4004
4005 if (!RB_OBJ_SHAREABLE_P(procv) &&
4006 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4007 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4008 }
4009
4010 rb_proc_t *proc;
4011 GetProcPtr(procv, proc);
4012 const struct rb_block *block = &proc->block;
4013
4014 while (vm_block_type(block) == block_type_proc) {
4015 block = vm_proc_block(block->as.proc);
4016 }
4017 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4018
4019 const struct rb_captured_block *captured = &block->as.captured;
4020 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4021 VALUE * const argv = cfp->sp - calling->argc;
4022 const int arg_size = ISEQ_BODY(iseq)->param.size;
4023
4024 int opt_pc;
4025 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4026 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4027 }
4028 else {
4029 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4030 }
4031
4032 cfp->sp = argv - 1; // -1 for the receiver
4033
4034 vm_push_frame(ec, iseq,
4035 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4036 calling->recv,
4037 VM_GUARDED_PREV_EP(captured->ep),
4038 (VALUE)cme,
4039 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4040 argv + arg_size,
4041 ISEQ_BODY(iseq)->local_table_size - arg_size,
4042 ISEQ_BODY(iseq)->stack_max);
4043
4044 return Qundef;
4045}
4046
4047static VALUE
4048vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4049{
4050 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4051
4052 VALUE *argv;
4053 int argc;
4054 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4055 if (UNLIKELY(calling->heap_argv)) {
4056 argv = RARRAY_PTR(calling->heap_argv);
4057 cfp->sp -= 2;
4058 }
4059 else {
4060 argc = calling->argc;
4061 argv = ALLOCA_N(VALUE, argc);
4062 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4063 cfp->sp += - argc - 1;
4064 }
4065
4066 return vm_call_bmethod_body(ec, calling, argv);
4067}
4068
4069static VALUE
4070vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4071{
4072 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4073
4074 const struct rb_callcache *cc = calling->cc;
4075 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4076 VALUE procv = cme->def->body.bmethod.proc;
4077 rb_proc_t *proc;
4078 GetProcPtr(procv, proc);
4079 const struct rb_block *block = &proc->block;
4080
4081 while (vm_block_type(block) == block_type_proc) {
4082 block = vm_proc_block(block->as.proc);
4083 }
4084 if (vm_block_type(block) == block_type_iseq) {
4085 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4086 return vm_call_iseq_bmethod(ec, cfp, calling);
4087 }
4088
4089 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4090 return vm_call_noniseq_bmethod(ec, cfp, calling);
4091}
4092
4093VALUE
4094rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4095{
4096 VALUE klass = current_class;
4097
4098 /* for prepended Module, then start from cover class */
4099 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4100 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4101 klass = RBASIC_CLASS(klass);
4102 }
4103
4104 while (RTEST(klass)) {
4105 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4106 if (owner == target_owner) {
4107 return klass;
4108 }
4109 klass = RCLASS_SUPER(klass);
4110 }
4111
4112 return current_class; /* maybe module function */
4113}
4114
4115static const rb_callable_method_entry_t *
4116aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4117{
4118 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4119 const rb_callable_method_entry_t *cme;
4120
4121 if (orig_me->defined_class == 0) {
4122 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4123 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4124 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4125
4126 if (me->def->reference_count == 1) {
4127 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4128 }
4129 else {
4131 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4132 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4133 }
4134 }
4135 else {
4136 cme = (const rb_callable_method_entry_t *)orig_me;
4137 }
4138
4139 VM_ASSERT(callable_method_entry_p(cme));
4140 return cme;
4141}
4142
4144rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4145{
4146 return aliased_callable_method_entry(me);
4147}
4148
4149static VALUE
4150vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4151{
4152 calling->cc = &VM_CC_ON_STACK(Qundef,
4153 vm_call_general,
4154 {{0}},
4155 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4156
4157 return vm_call_method_each_type(ec, cfp, calling);
4158}
4159
4160static enum method_missing_reason
4161ci_missing_reason(const struct rb_callinfo *ci)
4162{
4163 enum method_missing_reason stat = MISSING_NOENTRY;
4164 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4165 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4166 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4167 return stat;
4168}
4169
4170static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4171
4172static VALUE
4173vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4174 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4175{
4176 ASSUME(calling->argc >= 0);
4177
4178 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4179 int argc = calling->argc;
4180 VALUE recv = calling->recv;
4181 VALUE klass = CLASS_OF(recv);
4182 ID mid = rb_check_id(&symbol);
4183 flags |= VM_CALL_OPT_SEND;
4184
4185 if (UNLIKELY(! mid)) {
4186 mid = idMethodMissing;
4187 missing_reason = ci_missing_reason(ci);
4188 ec->method_missing_reason = missing_reason;
4189
4190 VALUE argv_ary;
4191 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4192 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4193 rb_ary_unshift(argv_ary, symbol);
4194
4195 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4196 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4197 VALUE exc = rb_make_no_method_exception(
4198 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4199
4200 rb_exc_raise(exc);
4201 }
4202 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4203 }
4204 else {
4205 /* E.g. when argc == 2
4206 *
4207 * | | | | TOPN
4208 * | | +------+
4209 * | | +---> | arg1 | 0
4210 * +------+ | +------+
4211 * | arg1 | -+ +-> | arg0 | 1
4212 * +------+ | +------+
4213 * | arg0 | ---+ | sym | 2
4214 * +------+ +------+
4215 * | recv | | recv | 3
4216 * --+------+--------+------+------
4217 */
4218 int i = argc;
4219 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4220 INC_SP(1);
4221 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4222 argc = ++calling->argc;
4223
4224 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4225 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4226 TOPN(i) = symbol;
4227 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4228 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4229 VALUE exc = rb_make_no_method_exception(
4230 rb_eNoMethodError, 0, recv, argc, argv, priv);
4231
4232 rb_exc_raise(exc);
4233 }
4234 else {
4235 TOPN(i) = rb_str_intern(symbol);
4236 }
4237 }
4238 }
4239
4240 struct rb_forwarding_call_data new_fcd = {
4241 .cd = {
4242 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4243 .cc = NULL,
4244 },
4245 .caller_ci = NULL,
4246 };
4247
4248 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4249 calling->cd = &new_fcd.cd;
4250 }
4251 else {
4252 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4253 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4254 new_fcd.caller_ci = caller_ci;
4255 calling->cd = (struct rb_call_data *)&new_fcd;
4256 }
4257 calling->cc = &VM_CC_ON_STACK(klass,
4258 vm_call_general,
4259 { .method_missing_reason = missing_reason },
4260 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4261
4262 if (flags & VM_CALL_FCALL) {
4263 return vm_call_method(ec, reg_cfp, calling);
4264 }
4265
4266 const struct rb_callcache *cc = calling->cc;
4267 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4268
4269 if (vm_cc_cme(cc) != NULL) {
4270 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4271 case METHOD_VISI_PUBLIC: /* likely */
4272 return vm_call_method_each_type(ec, reg_cfp, calling);
4273 case METHOD_VISI_PRIVATE:
4274 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4275 break;
4276 case METHOD_VISI_PROTECTED:
4277 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4278 break;
4279 default:
4280 VM_UNREACHABLE(vm_call_method);
4281 }
4282 return vm_call_method_missing(ec, reg_cfp, calling);
4283 }
4284
4285 return vm_call_method_nome(ec, reg_cfp, calling);
4286}
4287
4288static VALUE
4289vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4290{
4291 const struct rb_callinfo *ci = calling->cd->ci;
4292 int i;
4293 VALUE sym;
4294
4295 i = calling->argc - 1;
4296
4297 if (calling->argc == 0) {
4298 rb_raise(rb_eArgError, "no method name given");
4299 }
4300
4301 sym = TOPN(i);
4302 /* E.g. when i == 2
4303 *
4304 * | | | | TOPN
4305 * +------+ | |
4306 * | arg1 | ---+ | | 0
4307 * +------+ | +------+
4308 * | arg0 | -+ +-> | arg1 | 1
4309 * +------+ | +------+
4310 * | sym | +---> | arg0 | 2
4311 * +------+ +------+
4312 * | recv | | recv | 3
4313 * --+------+--------+------+------
4314 */
4315 /* shift arguments */
4316 if (i > 0) {
4317 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4318 }
4319 calling->argc -= 1;
4320 DEC_SP(1);
4321
4322 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4323}
4324
4325static VALUE
4326vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4327{
4328 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4329 const struct rb_callinfo *ci = calling->cd->ci;
4330 int flags = VM_CALL_FCALL;
4331 VALUE sym;
4332
4333 VALUE argv_ary;
4334 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4335 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4336 sym = rb_ary_shift(argv_ary);
4337 flags |= VM_CALL_ARGS_SPLAT;
4338 if (calling->kw_splat) {
4339 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4340 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4341 calling->kw_splat = 0;
4342 }
4343 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4344 }
4345
4346 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4347 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4348}
4349
4350static VALUE
4351vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4352{
4353 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4354 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4355}
4356
4357static VALUE
4358vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4359{
4360 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4361
4362 const struct rb_callinfo *ci = calling->cd->ci;
4363 int flags = vm_ci_flag(ci);
4364
4365 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4366 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4367 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4368 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4369 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4370 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4371 }
4372
4373 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4374 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4375}
4376
4377static VALUE
4378vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4379 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4380{
4381 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4382
4383 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4384 unsigned int argc, flag;
4385
4386 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4387 argc = ++calling->argc;
4388
4389 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4390 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4391 vm_check_canary(ec, reg_cfp->sp);
4392 if (argc > 1) {
4393 MEMMOVE(argv+1, argv, VALUE, argc-1);
4394 }
4395 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4396 INC_SP(1);
4397
4398 ec->method_missing_reason = reason;
4399
4400 struct rb_forwarding_call_data new_fcd = {
4401 .cd = {
4402 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4403 .cc = NULL,
4404 },
4405 .caller_ci = NULL,
4406 };
4407
4408 if (!(flag & VM_CALL_FORWARDING)) {
4409 calling->cd = &new_fcd.cd;
4410 }
4411 else {
4412 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4413 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4414 new_fcd.caller_ci = caller_ci;
4415 calling->cd = (struct rb_call_data *)&new_fcd;
4416 }
4417
4418 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4419 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4420 return vm_call_method(ec, reg_cfp, calling);
4421}
4422
4423static VALUE
4424vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4425{
4426 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4427}
4428
4429static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4430static VALUE
4431vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4432{
4433 klass = RCLASS_SUPER(klass);
4434
4435 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4436 if (cme == NULL) {
4437 return vm_call_method_nome(ec, cfp, calling);
4438 }
4439 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4440 cme->def->body.refined.orig_me) {
4441 cme = refined_method_callable_without_refinement(cme);
4442 }
4443
4444 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4445
4446 return vm_call_method_each_type(ec, cfp, calling);
4447}
4448
4449static inline VALUE
4450find_refinement(VALUE refinements, VALUE klass)
4451{
4452 if (NIL_P(refinements)) {
4453 return Qnil;
4454 }
4455 return rb_hash_lookup(refinements, klass);
4456}
4457
4458PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4459static rb_control_frame_t *
4460current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4461{
4462 rb_control_frame_t *top_cfp = cfp;
4463
4464 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4465 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4466
4467 do {
4468 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4469 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4470 /* TODO: orphan block */
4471 return top_cfp;
4472 }
4473 } while (cfp->iseq != local_iseq);
4474 }
4475 return cfp;
4476}
4477
4478static const rb_callable_method_entry_t *
4479refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4480{
4481 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4482 const rb_callable_method_entry_t *cme;
4483
4484 if (orig_me->defined_class == 0) {
4485 cme = NULL;
4487 }
4488 else {
4489 cme = (const rb_callable_method_entry_t *)orig_me;
4490 }
4491
4492 VM_ASSERT(callable_method_entry_p(cme));
4493
4494 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4495 cme = NULL;
4496 }
4497
4498 return cme;
4499}
4500
4501static const rb_callable_method_entry_t *
4502search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4503{
4504 ID mid = vm_ci_mid(calling->cd->ci);
4505 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4506 const struct rb_callcache * const cc = calling->cc;
4507 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4508
4509 for (; cref; cref = CREF_NEXT(cref)) {
4510 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4511 if (NIL_P(refinement)) continue;
4512
4513 const rb_callable_method_entry_t *const ref_me =
4514 rb_callable_method_entry(refinement, mid);
4515
4516 if (ref_me) {
4517 if (vm_cc_call(cc) == vm_call_super_method) {
4518 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4519 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4520 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4521 continue;
4522 }
4523 }
4524
4525 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4526 cme->def != ref_me->def) {
4527 cme = ref_me;
4528 }
4529 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4530 return cme;
4531 }
4532 }
4533 else {
4534 return NULL;
4535 }
4536 }
4537
4538 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4539 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4540 }
4541 else {
4542 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4543 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4544 return cme;
4545 }
4546}
4547
4548static VALUE
4549vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4550{
4551 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4552
4553 if (ref_cme) {
4554 if (calling->cd->cc) {
4555 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4556 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4557 return vm_call_method(ec, cfp, calling);
4558 }
4559 else {
4560 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4561 calling->cc= ref_cc;
4562 return vm_call_method(ec, cfp, calling);
4563 }
4564 }
4565 else {
4566 return vm_call_method_nome(ec, cfp, calling);
4567 }
4568}
4569
4570static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4571
4572NOINLINE(static VALUE
4573 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4574 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4575
4576static VALUE
4577vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4578 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4579{
4580 int argc = calling->argc;
4581
4582 /* remove self */
4583 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4584 DEC_SP(1);
4585
4586 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4587}
4588
4589static VALUE
4590vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4591{
4592 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4593
4594 const struct rb_callinfo *ci = calling->cd->ci;
4595 VALUE procval = calling->recv;
4596 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4597}
4598
4599static VALUE
4600vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4601{
4602 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4603
4604 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4605 const struct rb_callinfo *ci = calling->cd->ci;
4606
4607 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4608 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4609 }
4610 else {
4611 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4612 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4613 return vm_call_general(ec, reg_cfp, calling);
4614 }
4615}
4616
4617static VALUE
4618vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4619{
4620 VALUE recv = calling->recv;
4621
4622 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4623 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4624 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4625
4626 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4627 return internal_RSTRUCT_GET(recv, off);
4628}
4629
4630static VALUE
4631vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4632{
4633 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4634
4635 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4636 reg_cfp->sp -= 1;
4637 return ret;
4638}
4639
4640static VALUE
4641vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4642{
4643 VALUE recv = calling->recv;
4644
4645 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4646 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4647 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4648
4649 rb_check_frozen(recv);
4650
4651 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4652 internal_RSTRUCT_SET(recv, off, val);
4653
4654 return val;
4655}
4656
4657static VALUE
4658vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4659{
4660 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4661
4662 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4663 reg_cfp->sp -= 2;
4664 return ret;
4665}
4666
4667NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4668 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4669
4670#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4671 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4672 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4673 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4674 var = func; \
4675 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4676 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4677 } \
4678 else { \
4679 nohook; \
4680 var = func; \
4681 }
4682
4683static VALUE
4684vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4685 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4686{
4687 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4688 case OPTIMIZED_METHOD_TYPE_SEND:
4689 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4690 return vm_call_opt_send(ec, cfp, calling);
4691 case OPTIMIZED_METHOD_TYPE_CALL:
4692 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4693 return vm_call_opt_call(ec, cfp, calling);
4694 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4695 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4696 return vm_call_opt_block_call(ec, cfp, calling);
4697 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4698 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4699 rb_check_arity(calling->argc, 0, 0);
4700
4701 VALUE v;
4702 VM_CALL_METHOD_ATTR(v,
4703 vm_call_opt_struct_aref(ec, cfp, calling),
4704 set_vm_cc_ivar(cc); \
4705 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4706 return v;
4707 }
4708 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4709 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4710 rb_check_arity(calling->argc, 1, 1);
4711
4712 VALUE v;
4713 VM_CALL_METHOD_ATTR(v,
4714 vm_call_opt_struct_aset(ec, cfp, calling),
4715 set_vm_cc_ivar(cc); \
4716 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4717 return v;
4718 }
4719 default:
4720 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4721 }
4722}
4723
4724static VALUE
4725vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4726{
4727 const struct rb_callinfo *ci = calling->cd->ci;
4728 const struct rb_callcache *cc = calling->cc;
4729 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4730 VALUE v;
4731
4732 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4733
4734 switch (cme->def->type) {
4735 case VM_METHOD_TYPE_ISEQ:
4736 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4737 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4738 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4739 }
4740 else {
4741 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4742 return vm_call_iseq_setup(ec, cfp, calling);
4743 }
4744
4745 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4746 case VM_METHOD_TYPE_CFUNC:
4747 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4748 return vm_call_cfunc(ec, cfp, calling);
4749
4750 case VM_METHOD_TYPE_ATTRSET:
4751 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4752
4753 rb_check_arity(calling->argc, 1, 1);
4754
4755 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4756
4757 if (vm_cc_markable(cc)) {
4758 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4759 VM_CALL_METHOD_ATTR(v,
4760 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4761 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4762 }
4763 else {
4764 cc = &((struct rb_callcache) {
4765 .flags = T_IMEMO |
4766 (imemo_callcache << FL_USHIFT) |
4767 VM_CALLCACHE_UNMARKABLE |
4768 VM_CALLCACHE_ON_STACK,
4769 .klass = cc->klass,
4770 .cme_ = cc->cme_,
4771 .call_ = cc->call_,
4772 .aux_ = {
4773 .attr = {
4774 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4775 }
4776 },
4777 });
4778
4779 VM_CALL_METHOD_ATTR(v,
4780 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4781 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4782 }
4783 return v;
4784
4785 case VM_METHOD_TYPE_IVAR:
4786 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4787 rb_check_arity(calling->argc, 0, 0);
4788 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4789 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4790 VM_CALL_METHOD_ATTR(v,
4791 vm_call_ivar(ec, cfp, calling),
4792 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4793 return v;
4794
4795 case VM_METHOD_TYPE_MISSING:
4796 vm_cc_method_missing_reason_set(cc, 0);
4797 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4798 return vm_call_method_missing(ec, cfp, calling);
4799
4800 case VM_METHOD_TYPE_BMETHOD:
4801 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4802 return vm_call_bmethod(ec, cfp, calling);
4803
4804 case VM_METHOD_TYPE_ALIAS:
4805 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4806 return vm_call_alias(ec, cfp, calling);
4807
4808 case VM_METHOD_TYPE_OPTIMIZED:
4809 return vm_call_optimized(ec, cfp, calling, ci, cc);
4810
4811 case VM_METHOD_TYPE_UNDEF:
4812 break;
4813
4814 case VM_METHOD_TYPE_ZSUPER:
4815 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4816
4817 case VM_METHOD_TYPE_REFINED:
4818 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4819 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4820 return vm_call_refined(ec, cfp, calling);
4821 }
4822
4823 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4824}
4825
4826NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4827
4828static VALUE
4829vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4830{
4831 /* method missing */
4832 const struct rb_callinfo *ci = calling->cd->ci;
4833 const int stat = ci_missing_reason(ci);
4834
4835 if (vm_ci_mid(ci) == idMethodMissing) {
4836 if (UNLIKELY(calling->heap_argv)) {
4837 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4838 }
4839 else {
4840 rb_control_frame_t *reg_cfp = cfp;
4841 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4842 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4843 }
4844 }
4845 else {
4846 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4847 }
4848}
4849
4850/* Protected method calls and super invocations need to check that the receiver
4851 * (self for super) inherits the module on which the method is defined.
4852 * In the case of refinements, it should consider the original class not the
4853 * refinement.
4854 */
4855static VALUE
4856vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4857{
4858 VALUE defined_class = me->defined_class;
4859 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4860 return NIL_P(refined_class) ? defined_class : refined_class;
4861}
4862
4863static inline VALUE
4864vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4865{
4866 const struct rb_callinfo *ci = calling->cd->ci;
4867 const struct rb_callcache *cc = calling->cc;
4868
4869 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4870
4871 if (vm_cc_cme(cc) != NULL) {
4872 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4873 case METHOD_VISI_PUBLIC: /* likely */
4874 return vm_call_method_each_type(ec, cfp, calling);
4875
4876 case METHOD_VISI_PRIVATE:
4877 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4878 enum method_missing_reason stat = MISSING_PRIVATE;
4879 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4880
4881 vm_cc_method_missing_reason_set(cc, stat);
4882 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4883 return vm_call_method_missing(ec, cfp, calling);
4884 }
4885 return vm_call_method_each_type(ec, cfp, calling);
4886
4887 case METHOD_VISI_PROTECTED:
4888 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4889 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4890 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4891 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4892 return vm_call_method_missing(ec, cfp, calling);
4893 }
4894 else {
4895 /* caching method info to dummy cc */
4896 VM_ASSERT(vm_cc_cme(cc) != NULL);
4897 struct rb_callcache cc_on_stack = *cc;
4898 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4899 calling->cc = &cc_on_stack;
4900 return vm_call_method_each_type(ec, cfp, calling);
4901 }
4902 }
4903 return vm_call_method_each_type(ec, cfp, calling);
4904
4905 default:
4906 rb_bug("unreachable");
4907 }
4908 }
4909 else {
4910 return vm_call_method_nome(ec, cfp, calling);
4911 }
4912}
4913
4914static VALUE
4915vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4916{
4917 RB_DEBUG_COUNTER_INC(ccf_general);
4918 return vm_call_method(ec, reg_cfp, calling);
4919}
4920
4921void
4922rb_vm_cc_general(const struct rb_callcache *cc)
4923{
4924 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4925 VM_ASSERT(cc != vm_cc_empty());
4926
4927 *(vm_call_handler *)&cc->call_ = vm_call_general;
4928}
4929
4930static VALUE
4931vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4932{
4933 RB_DEBUG_COUNTER_INC(ccf_super_method);
4934
4935 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4936 // can merge the function and the address of the function becomes same.
4937 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4938 if (ec == NULL) rb_bug("unreachable");
4939
4940 /* this check is required to distinguish with other functions. */
4941 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4942 return vm_call_method(ec, reg_cfp, calling);
4943}
4944
4945/* super */
4946
4947static inline VALUE
4948vm_search_normal_superclass(VALUE klass)
4949{
4950 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4951 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4952 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4953 klass = RBASIC(klass)->klass;
4954 }
4955 klass = RCLASS_ORIGIN(klass);
4956 return RCLASS_SUPER(klass);
4957}
4958
4959NORETURN(static void vm_super_outside(void));
4960
4961static void
4962vm_super_outside(void)
4963{
4964 rb_raise(rb_eNoMethodError, "super called outside of method");
4965}
4966
4967static const struct rb_callcache *
4968empty_cc_for_super(void)
4969{
4970 return &vm_empty_cc_for_super;
4971}
4972
4973static const struct rb_callcache *
4974vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
4975{
4976 VALUE current_defined_class;
4977 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
4978
4979 if (!me) {
4980 vm_super_outside();
4981 }
4982
4983 current_defined_class = vm_defined_class_for_protected_call(me);
4984
4985 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
4986 reg_cfp->iseq != method_entry_iseqptr(me) &&
4987 !rb_obj_is_kind_of(recv, current_defined_class)) {
4988 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
4989 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
4990
4991 if (m) { /* not bound UnboundMethod */
4992 rb_raise(rb_eTypeError,
4993 "self has wrong type to call super in this context: "
4994 "%"PRIsVALUE" (expected %"PRIsVALUE")",
4995 rb_obj_class(recv), m);
4996 }
4997 }
4998
4999 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5000 rb_raise(rb_eRuntimeError,
5001 "implicit argument passing of super from method defined"
5002 " by define_method() is not supported."
5003 " Specify all arguments explicitly.");
5004 }
5005
5006 ID mid = me->def->original_id;
5007
5008 if (!vm_ci_markable(cd->ci)) {
5009 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5010 }
5011 else {
5012 // update iseq. really? (TODO)
5013 cd->ci = vm_ci_new_runtime(mid,
5014 vm_ci_flag(cd->ci),
5015 vm_ci_argc(cd->ci),
5016 vm_ci_kwarg(cd->ci));
5017
5018 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5019 }
5020
5021 const struct rb_callcache *cc;
5022
5023 VALUE klass = vm_search_normal_superclass(me->defined_class);
5024
5025 if (!klass) {
5026 /* bound instance method of module */
5027 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5028 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5029 }
5030 else {
5031 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5032 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5033
5034 // define_method can cache for different method id
5035 if (cached_cme == NULL) {
5036 // empty_cc_for_super is not markable object
5037 cd->cc = empty_cc_for_super();
5038 }
5039 else if (cached_cme->called_id != mid) {
5040 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5041 if (cme) {
5042 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5043 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5044 }
5045 else {
5046 cd->cc = cc = empty_cc_for_super();
5047 }
5048 }
5049 else {
5050 switch (cached_cme->def->type) {
5051 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5052 case VM_METHOD_TYPE_REFINED:
5053 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5054 case VM_METHOD_TYPE_ATTRSET:
5055 case VM_METHOD_TYPE_IVAR:
5056 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5057 break;
5058 default:
5059 break; // use fastpath
5060 }
5061 }
5062 }
5063
5064 VM_ASSERT((vm_cc_cme(cc), true));
5065
5066 return cc;
5067}
5068
5069/* yield */
5070
5071static inline int
5072block_proc_is_lambda(const VALUE procval)
5073{
5074 rb_proc_t *proc;
5075
5076 if (procval) {
5077 GetProcPtr(procval, proc);
5078 return proc->is_lambda;
5079 }
5080 else {
5081 return 0;
5082 }
5083}
5084
5085static inline const rb_namespace_t *
5086block_proc_namespace(const VALUE procval)
5087{
5088 rb_proc_t *proc;
5089
5090 if (procval) {
5091 GetProcPtr(procval, proc);
5092 return proc->ns;
5093 }
5094 else {
5095 return NULL;
5096 }
5097}
5098
5099static VALUE
5100vm_yield_with_cfunc(rb_execution_context_t *ec,
5101 const struct rb_captured_block *captured,
5102 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5104{
5105 int is_lambda = FALSE; /* TODO */
5106 VALUE val, arg, blockarg;
5107 int frame_flag;
5108 const struct vm_ifunc *ifunc = captured->code.ifunc;
5109
5110 if (is_lambda) {
5111 arg = rb_ary_new4(argc, argv);
5112 }
5113 else if (argc == 0) {
5114 arg = Qnil;
5115 }
5116 else {
5117 arg = argv[0];
5118 }
5119
5120 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5121
5122 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5123 if (kw_splat) {
5124 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5125 }
5126
5127 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5128 frame_flag,
5129 self,
5130 VM_GUARDED_PREV_EP(captured->ep),
5131 (VALUE)me,
5132 0, ec->cfp->sp, 0, 0);
5133 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5134 rb_vm_pop_frame(ec);
5135
5136 return val;
5137}
5138
5139VALUE
5140rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5141{
5142 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5143}
5144
5145static VALUE
5146vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5147{
5148 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5149}
5150
5151static inline int
5152vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5153{
5154 int i;
5155 long len = RARRAY_LEN(ary);
5156
5157 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5158
5159 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5160 argv[i] = RARRAY_AREF(ary, i);
5161 }
5162
5163 return i;
5164}
5165
5166static inline VALUE
5167vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5168{
5169 VALUE ary, arg0 = argv[0];
5170 ary = rb_check_array_type(arg0);
5171#if 0
5172 argv[0] = arg0;
5173#else
5174 VM_ASSERT(argv[0] == arg0);
5175#endif
5176 return ary;
5177}
5178
5179static int
5180vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5181{
5182 if (rb_simple_iseq_p(iseq)) {
5183 rb_control_frame_t *cfp = ec->cfp;
5184 VALUE arg0;
5185
5186 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5187
5188 if (arg_setup_type == arg_setup_block &&
5189 calling->argc == 1 &&
5190 ISEQ_BODY(iseq)->param.flags.has_lead &&
5191 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5192 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5193 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5194 }
5195
5196 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5197 if (arg_setup_type == arg_setup_block) {
5198 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5199 int i;
5200 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5201 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5202 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5203 }
5204 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5205 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5206 }
5207 }
5208 else {
5209 argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5210 }
5211 }
5212
5213 return 0;
5214 }
5215 else {
5216 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5217 }
5218}
5219
5220static int
5221vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5222{
5223 struct rb_calling_info calling_entry, *calling;
5224
5225 calling = &calling_entry;
5226 calling->argc = argc;
5227 calling->block_handler = block_handler;
5228 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5229 calling->recv = Qundef;
5230 calling->heap_argv = 0;
5231 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5232
5233 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5234}
5235
5236/* ruby iseq -> ruby block */
5237
5238static VALUE
5239vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5240 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5241 bool is_lambda, VALUE block_handler)
5242{
5243 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5244 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5245 const int arg_size = ISEQ_BODY(iseq)->param.size;
5246 VALUE * const rsp = GET_SP() - calling->argc;
5247 VALUE * const argv = rsp;
5248 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5249 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5250
5251 SET_SP(rsp);
5252
5253 if (calling->proc_ns) {
5254 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5255 }
5256
5257 vm_push_frame(ec, iseq,
5258 frame_flag,
5259 captured->self,
5260 VM_GUARDED_PREV_EP(captured->ep), 0,
5261 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5262 rsp + arg_size,
5263 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5264
5265 return Qundef;
5266}
5267
5268static VALUE
5269vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5270 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5271 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5272{
5273 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5274 int flags = vm_ci_flag(ci);
5275
5276 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5277 ((calling->argc == 0) ||
5278 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5279 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5280 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5281 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5282 flags = 0;
5283 if (UNLIKELY(calling->heap_argv)) {
5284#if VM_ARGC_STACK_MAX < 0
5285 if (RARRAY_LEN(calling->heap_argv) < 1) {
5286 rb_raise(rb_eArgError, "no receiver given");
5287 }
5288#endif
5289 calling->recv = rb_ary_shift(calling->heap_argv);
5290 // Modify stack to avoid cfp consistency error
5291 reg_cfp->sp++;
5292 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5293 reg_cfp->sp[-2] = calling->recv;
5294 flags |= VM_CALL_ARGS_SPLAT;
5295 }
5296 else {
5297 if (calling->argc < 1) {
5298 rb_raise(rb_eArgError, "no receiver given");
5299 }
5300 calling->recv = TOPN(--calling->argc);
5301 }
5302 if (calling->kw_splat) {
5303 flags |= VM_CALL_KW_SPLAT;
5304 }
5305 }
5306 else {
5307 if (calling->argc < 1) {
5308 rb_raise(rb_eArgError, "no receiver given");
5309 }
5310 calling->recv = TOPN(--calling->argc);
5311 }
5312
5313 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5314}
5315
5316static VALUE
5317vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5318 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5319 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5320{
5321 VALUE val;
5322 int argc;
5323 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5324 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5325 argc = calling->argc;
5326 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5327 POPN(argc); /* TODO: should put before C/yield? */
5328 return val;
5329}
5330
5331static VALUE
5332vm_proc_to_block_handler(VALUE procval)
5333{
5334 const struct rb_block *block = vm_proc_block(procval);
5335
5336 switch (vm_block_type(block)) {
5337 case block_type_iseq:
5338 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5339 case block_type_ifunc:
5340 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5341 case block_type_symbol:
5342 return VM_BH_FROM_SYMBOL(block->as.symbol);
5343 case block_type_proc:
5344 return VM_BH_FROM_PROC(block->as.proc);
5345 }
5346 VM_UNREACHABLE(vm_yield_with_proc);
5347 return Qundef;
5348}
5349
5350static VALUE
5351vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5352 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5353 bool is_lambda, VALUE block_handler)
5354{
5355 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5356 VALUE proc = VM_BH_TO_PROC(block_handler);
5357 if (!calling->proc_ns) {
5358 calling->proc_ns = block_proc_namespace(proc);
5359 }
5360 is_lambda = block_proc_is_lambda(proc);
5361 block_handler = vm_proc_to_block_handler(proc);
5362 }
5363
5364 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5365}
5366
5367static inline VALUE
5368vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5369 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5370 bool is_lambda, VALUE block_handler)
5371{
5372 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5373 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5374 bool is_lambda, VALUE block_handler);
5375
5376 switch (vm_block_handler_type(block_handler)) {
5377 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5378 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5379 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5380 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5381 default: rb_bug("vm_invoke_block: unreachable");
5382 }
5383
5384 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5385}
5386
5387static VALUE
5388vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5389{
5390 const rb_execution_context_t *ec = GET_EC();
5391 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5392 struct rb_captured_block *captured;
5393
5394 if (cfp == 0) {
5395 rb_bug("vm_make_proc_with_iseq: unreachable");
5396 }
5397
5398 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5399 captured->code.iseq = blockiseq;
5400
5401 return rb_vm_make_proc(ec, captured, rb_cProc);
5402}
5403
5404static VALUE
5405vm_once_exec(VALUE iseq)
5406{
5407 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5408 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5409}
5410
5411static VALUE
5412vm_once_clear(VALUE data)
5413{
5414 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5415 is->once.running_thread = NULL;
5416 return Qnil;
5417}
5418
5419/* defined insn */
5420
5421static bool
5422check_respond_to_missing(VALUE obj, VALUE v)
5423{
5424 VALUE args[2];
5425 VALUE r;
5426
5427 args[0] = obj; args[1] = Qfalse;
5428 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5429 if (!UNDEF_P(r) && RTEST(r)) {
5430 return true;
5431 }
5432 else {
5433 return false;
5434 }
5435}
5436
5437static bool
5438vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5439{
5440 VALUE klass;
5441 enum defined_type type = (enum defined_type)op_type;
5442
5443 switch (type) {
5444 case DEFINED_IVAR:
5445 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5446 break;
5447 case DEFINED_GVAR:
5448 return rb_gvar_defined(SYM2ID(obj));
5449 break;
5450 case DEFINED_CVAR: {
5451 const rb_cref_t *cref = vm_get_cref(GET_EP());
5452 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5453 return rb_cvar_defined(klass, SYM2ID(obj));
5454 break;
5455 }
5456 case DEFINED_CONST:
5457 case DEFINED_CONST_FROM: {
5458 bool allow_nil = type == DEFINED_CONST;
5459 klass = v;
5460 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5461 break;
5462 }
5463 case DEFINED_FUNC:
5464 klass = CLASS_OF(v);
5465 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5466 break;
5467 case DEFINED_METHOD:{
5468 VALUE klass = CLASS_OF(v);
5469 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5470
5471 if (me) {
5472 switch (METHOD_ENTRY_VISI(me)) {
5473 case METHOD_VISI_PRIVATE:
5474 break;
5475 case METHOD_VISI_PROTECTED:
5476 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5477 break;
5478 }
5479 case METHOD_VISI_PUBLIC:
5480 return true;
5481 break;
5482 default:
5483 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5484 }
5485 }
5486 else {
5487 return check_respond_to_missing(obj, v);
5488 }
5489 break;
5490 }
5491 case DEFINED_YIELD:
5492 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5493 return true;
5494 }
5495 break;
5496 case DEFINED_ZSUPER:
5497 {
5498 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5499
5500 if (me) {
5501 VALUE klass = vm_search_normal_superclass(me->defined_class);
5502 if (!klass) return false;
5503
5504 ID id = me->def->original_id;
5505
5506 return rb_method_boundp(klass, id, 0);
5507 }
5508 }
5509 break;
5510 case DEFINED_REF:
5511 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5512 default:
5513 rb_bug("unimplemented defined? type (VM)");
5514 break;
5515 }
5516
5517 return false;
5518}
5519
5520bool
5521rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5522{
5523 return vm_defined(ec, reg_cfp, op_type, obj, v);
5524}
5525
5526static const VALUE *
5527vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5528{
5529 rb_num_t i;
5530 const VALUE *ep = reg_ep;
5531 for (i = 0; i < lv; i++) {
5532 ep = GET_PREV_EP(ep);
5533 }
5534 return ep;
5535}
5536
5537static VALUE
5538vm_get_special_object(const VALUE *const reg_ep,
5539 enum vm_special_object_type type)
5540{
5541 switch (type) {
5542 case VM_SPECIAL_OBJECT_VMCORE:
5543 return rb_mRubyVMFrozenCore;
5544 case VM_SPECIAL_OBJECT_CBASE:
5545 return vm_get_cbase(reg_ep);
5546 case VM_SPECIAL_OBJECT_CONST_BASE:
5547 return vm_get_const_base(reg_ep);
5548 default:
5549 rb_bug("putspecialobject insn: unknown value_type %d", type);
5550 }
5551}
5552
5553static VALUE
5554vm_concat_array(VALUE ary1, VALUE ary2st)
5555{
5556 const VALUE ary2 = ary2st;
5557 VALUE tmp1 = rb_check_to_array(ary1);
5558 VALUE tmp2 = rb_check_to_array(ary2);
5559
5560 if (NIL_P(tmp1)) {
5561 tmp1 = rb_ary_new3(1, ary1);
5562 }
5563 if (tmp1 == ary1) {
5564 tmp1 = rb_ary_dup(ary1);
5565 }
5566
5567 if (NIL_P(tmp2)) {
5568 return rb_ary_push(tmp1, ary2);
5569 }
5570 else {
5571 return rb_ary_concat(tmp1, tmp2);
5572 }
5573}
5574
5575static VALUE
5576vm_concat_to_array(VALUE ary1, VALUE ary2st)
5577{
5578 /* ary1 must be a newly created array */
5579 const VALUE ary2 = ary2st;
5580
5581 if (NIL_P(ary2)) return ary1;
5582
5583 VALUE tmp2 = rb_check_to_array(ary2);
5584
5585 if (NIL_P(tmp2)) {
5586 return rb_ary_push(ary1, ary2);
5587 }
5588 else {
5589 return rb_ary_concat(ary1, tmp2);
5590 }
5591}
5592
5593// YJIT implementation is using the C function
5594// and needs to call a non-static function
5595VALUE
5596rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5597{
5598 return vm_concat_array(ary1, ary2st);
5599}
5600
5601VALUE
5602rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5603{
5604 return vm_concat_to_array(ary1, ary2st);
5605}
5606
5607static VALUE
5608vm_splat_array(VALUE flag, VALUE ary)
5609{
5610 if (NIL_P(ary)) {
5611 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5612 }
5613 VALUE tmp = rb_check_to_array(ary);
5614 if (NIL_P(tmp)) {
5615 return rb_ary_new3(1, ary);
5616 }
5617 else if (RTEST(flag)) {
5618 return rb_ary_dup(tmp);
5619 }
5620 else {
5621 return tmp;
5622 }
5623}
5624
5625// YJIT implementation is using the C function
5626// and needs to call a non-static function
5627VALUE
5628rb_vm_splat_array(VALUE flag, VALUE ary)
5629{
5630 return vm_splat_array(flag, ary);
5631}
5632
5633static VALUE
5634vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5635{
5636 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5637
5638 if (flag & VM_CHECKMATCH_ARRAY) {
5639 long i;
5640 const long n = RARRAY_LEN(pattern);
5641
5642 for (i = 0; i < n; i++) {
5643 VALUE v = RARRAY_AREF(pattern, i);
5644 VALUE c = check_match(ec, v, target, type);
5645
5646 if (RTEST(c)) {
5647 return c;
5648 }
5649 }
5650 return Qfalse;
5651 }
5652 else {
5653 return check_match(ec, pattern, target, type);
5654 }
5655}
5656
5657VALUE
5658rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5659{
5660 return vm_check_match(ec, target, pattern, flag);
5661}
5662
5663static VALUE
5664vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5665{
5666 const VALUE kw_bits = *(ep - bits);
5667
5668 if (FIXNUM_P(kw_bits)) {
5669 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5670 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5671 return Qfalse;
5672 }
5673 else {
5674 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5675 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5676 }
5677 return Qtrue;
5678}
5679
5680static void
5681vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5682{
5683 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5684 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5685 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5686 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5687
5688 switch (flag) {
5689 case RUBY_EVENT_CALL:
5690 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5691 return;
5692 case RUBY_EVENT_C_CALL:
5693 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5694 return;
5695 case RUBY_EVENT_RETURN:
5696 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5697 return;
5699 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5700 return;
5701 }
5702 }
5703}
5704
5705static VALUE
5706vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5707{
5708 if (!rb_const_defined_at(cbase, id)) {
5709 return 0;
5710 }
5711 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5712 return rb_public_const_get_at(cbase, id);
5713 }
5714 else {
5715 return rb_const_get_at(cbase, id);
5716 }
5717}
5718
5719static VALUE
5720vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5721{
5722 if (!RB_TYPE_P(klass, T_CLASS)) {
5723 return 0;
5724 }
5725 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5726 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5727
5728 if (tmp != super) {
5729 rb_raise(rb_eTypeError,
5730 "superclass mismatch for class %"PRIsVALUE"",
5731 rb_id2str(id));
5732 }
5733 else {
5734 return klass;
5735 }
5736 }
5737 else {
5738 return klass;
5739 }
5740}
5741
5742static VALUE
5743vm_check_if_module(ID id, VALUE mod)
5744{
5745 if (!RB_TYPE_P(mod, T_MODULE)) {
5746 return 0;
5747 }
5748 else {
5749 return mod;
5750 }
5751}
5752
5753static VALUE
5754declare_under(ID id, VALUE cbase, VALUE c)
5755{
5756 rb_set_class_path_string(c, cbase, rb_id2str(id));
5757 rb_const_set(cbase, id, c);
5758 return c;
5759}
5760
5761static VALUE
5762vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5763{
5764 /* new class declaration */
5765 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5766 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5768 rb_class_inherited(s, c);
5769 return c;
5770}
5771
5772static VALUE
5773vm_declare_module(ID id, VALUE cbase)
5774{
5775 /* new module declaration */
5776 return declare_under(id, cbase, rb_module_new());
5777}
5778
5779NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5780static void
5781unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5782{
5783 VALUE name = rb_id2str(id);
5784 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5785 name, type);
5786 VALUE location = rb_const_source_location_at(cbase, id);
5787 if (!NIL_P(location)) {
5788 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5789 " previous definition of %"PRIsVALUE" was here",
5790 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5791 }
5793}
5794
5795static VALUE
5796vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5797{
5798 VALUE klass;
5799
5800 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5801 rb_raise(rb_eTypeError,
5802 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5803 rb_obj_class(super));
5804 }
5805
5806 vm_check_if_namespace(cbase);
5807
5808 /* find klass */
5809 rb_autoload_load(cbase, id);
5810
5811 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5812 if (!vm_check_if_class(id, flags, super, klass))
5813 unmatched_redefinition("class", cbase, id, klass);
5814 return klass;
5815 }
5816 else {
5817 return vm_declare_class(id, flags, cbase, super);
5818 }
5819}
5820
5821static VALUE
5822vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5823{
5824 VALUE mod;
5825
5826 vm_check_if_namespace(cbase);
5827 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5828 if (!vm_check_if_module(id, mod))
5829 unmatched_redefinition("module", cbase, id, mod);
5830 return mod;
5831 }
5832 else {
5833 return vm_declare_module(id, cbase);
5834 }
5835}
5836
5837static VALUE
5838vm_find_or_create_class_by_id(ID id,
5839 rb_num_t flags,
5840 VALUE cbase,
5841 VALUE super)
5842{
5843 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5844
5845 switch (type) {
5846 case VM_DEFINECLASS_TYPE_CLASS:
5847 /* classdef returns class scope value */
5848 return vm_define_class(id, flags, cbase, super);
5849
5850 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5851 /* classdef returns class scope value */
5852 return rb_singleton_class(cbase);
5853
5854 case VM_DEFINECLASS_TYPE_MODULE:
5855 /* classdef returns class scope value */
5856 return vm_define_module(id, flags, cbase);
5857
5858 default:
5859 rb_bug("unknown defineclass type: %d", (int)type);
5860 }
5861}
5862
5863static rb_method_visibility_t
5864vm_scope_visibility_get(const rb_execution_context_t *ec)
5865{
5866 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5867
5868 if (!vm_env_cref_by_cref(cfp->ep)) {
5869 return METHOD_VISI_PUBLIC;
5870 }
5871 else {
5872 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5873 }
5874}
5875
5876static int
5877vm_scope_module_func_check(const rb_execution_context_t *ec)
5878{
5879 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5880
5881 if (!vm_env_cref_by_cref(cfp->ep)) {
5882 return FALSE;
5883 }
5884 else {
5885 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5886 }
5887}
5888
5889static void
5890vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5891{
5892 VALUE klass;
5893 rb_method_visibility_t visi;
5894 rb_cref_t *cref = vm_ec_cref(ec);
5895
5896 if (is_singleton) {
5897 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5898 visi = METHOD_VISI_PUBLIC;
5899 }
5900 else {
5901 klass = CREF_CLASS_FOR_DEFINITION(cref);
5902 visi = vm_scope_visibility_get(ec);
5903 }
5904
5905 if (NIL_P(klass)) {
5906 rb_raise(rb_eTypeError, "no class/module to add method");
5907 }
5908
5909 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5910 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5911 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5912 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
5913 }
5914
5915 if (!is_singleton && vm_scope_module_func_check(ec)) {
5916 klass = rb_singleton_class(klass);
5917 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5918 }
5919}
5920
5921static VALUE
5922vm_invokeblock_i(struct rb_execution_context_struct *ec,
5923 struct rb_control_frame_struct *reg_cfp,
5924 struct rb_calling_info *calling)
5925{
5926 const struct rb_callinfo *ci = calling->cd->ci;
5927 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5928
5929 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5930 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5931 }
5932 else {
5933 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5934 }
5935}
5936
5937enum method_explorer_type {
5938 mexp_search_method,
5939 mexp_search_invokeblock,
5940 mexp_search_super,
5941};
5942
5943static inline VALUE
5944vm_sendish(
5945 struct rb_execution_context_struct *ec,
5946 struct rb_control_frame_struct *reg_cfp,
5947 struct rb_call_data *cd,
5948 VALUE block_handler,
5949 enum method_explorer_type method_explorer
5950) {
5951 VALUE val = Qundef;
5952 const struct rb_callinfo *ci = cd->ci;
5953 const struct rb_callcache *cc;
5954 int argc = vm_ci_argc(ci);
5955 VALUE recv = TOPN(argc);
5956 struct rb_calling_info calling = {
5957 .block_handler = block_handler,
5958 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5959 .recv = recv,
5960 .argc = argc,
5961 .cd = cd,
5962 };
5963
5964 switch (method_explorer) {
5965 case mexp_search_method:
5966 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5967 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5968 break;
5969 case mexp_search_super:
5970 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5971 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5972 break;
5973 case mexp_search_invokeblock:
5974 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5975 break;
5976 }
5977 return val;
5978}
5979
5980VALUE
5981rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5982{
5983 stack_check(ec);
5984
5985 struct rb_forwarding_call_data adjusted_cd;
5986 struct rb_callinfo adjusted_ci;
5987
5988 VALUE bh;
5989 VALUE val;
5990
5991 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5992 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5993
5994 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5995
5996 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
5997 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
5998 }
5999 }
6000 else {
6001 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6002 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6003 }
6004
6005 VM_EXEC(ec, val);
6006 return val;
6007}
6008
6009VALUE
6010rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6011{
6012 stack_check(ec);
6013 VALUE bh = VM_BLOCK_HANDLER_NONE;
6014 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6015 VM_EXEC(ec, val);
6016 return val;
6017}
6018
6019VALUE
6020rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6021{
6022 stack_check(ec);
6023 struct rb_forwarding_call_data adjusted_cd;
6024 struct rb_callinfo adjusted_ci;
6025
6026 VALUE bh;
6027 VALUE val;
6028
6029 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6030 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6031
6032 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6033
6034 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6035 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6036 }
6037 }
6038 else {
6039 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6040 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6041 }
6042
6043 VM_EXEC(ec, val);
6044 return val;
6045}
6046
6047VALUE
6048rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6049{
6050 stack_check(ec);
6051 VALUE bh = VM_BLOCK_HANDLER_NONE;
6052 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6053 VM_EXEC(ec, val);
6054 return val;
6055}
6056
6057/* object.c */
6058VALUE rb_nil_to_s(VALUE);
6059VALUE rb_true_to_s(VALUE);
6060VALUE rb_false_to_s(VALUE);
6061/* numeric.c */
6062VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6063VALUE rb_fix_to_s(VALUE);
6064/* variable.c */
6065VALUE rb_mod_to_s(VALUE);
6067
6068static VALUE
6069vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6070{
6071 int type = TYPE(recv);
6072 if (type == T_STRING) {
6073 return recv;
6074 }
6075
6076 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6077
6078 switch (type) {
6079 case T_SYMBOL:
6080 if (check_method_basic_definition(vm_cc_cme(cc))) {
6081 // rb_sym_to_s() allocates a mutable string, but since we are only
6082 // going to use this string for interpolation, it's fine to use the
6083 // frozen string.
6084 return rb_sym2str(recv);
6085 }
6086 break;
6087 case T_MODULE:
6088 case T_CLASS:
6089 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6090 // rb_mod_to_s() allocates a mutable string, but since we are only
6091 // going to use this string for interpolation, it's fine to use the
6092 // frozen string.
6093 VALUE val = rb_mod_name(recv);
6094 if (NIL_P(val)) {
6095 val = rb_mod_to_s(recv);
6096 }
6097 return val;
6098 }
6099 break;
6100 case T_NIL:
6101 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6102 return rb_nil_to_s(recv);
6103 }
6104 break;
6105 case T_TRUE:
6106 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6107 return rb_true_to_s(recv);
6108 }
6109 break;
6110 case T_FALSE:
6111 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6112 return rb_false_to_s(recv);
6113 }
6114 break;
6115 case T_FIXNUM:
6116 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6117 return rb_fix_to_s(recv);
6118 }
6119 break;
6120 }
6121 return Qundef;
6122}
6123
6124static VALUE
6125vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6126{
6127 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6128 return ary;
6129 }
6130 else {
6131 return Qundef;
6132 }
6133}
6134
6135static VALUE
6136vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6137{
6138 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6139 return hash;
6140 }
6141 else {
6142 return Qundef;
6143 }
6144}
6145
6146static VALUE
6147vm_opt_str_freeze(VALUE str, int bop, ID id)
6148{
6149 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6150 return str;
6151 }
6152 else {
6153 return Qundef;
6154 }
6155}
6156
6157/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6158#define id_cmp idCmp
6159
6160static VALUE
6161vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6162{
6163 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6164 return rb_ary_includes(ary, target);
6165 }
6166 else {
6167 VALUE args[1] = {target};
6168
6169 // duparray
6170 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6171 VALUE dupary = rb_ary_resurrect(ary);
6172
6173 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6174 }
6175}
6176
6177VALUE
6178rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6179{
6180 return vm_opt_duparray_include_p(ec, ary, target);
6181}
6182
6183static VALUE
6184vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6185{
6186 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6187 if (num == 0) {
6188 return Qnil;
6189 }
6190 else {
6191 VALUE result = *ptr;
6192 rb_snum_t i = num - 1;
6193 while (i-- > 0) {
6194 const VALUE v = *++ptr;
6195 if (OPTIMIZED_CMP(v, result) > 0) {
6196 result = v;
6197 }
6198 }
6199 return result;
6200 }
6201 }
6202 else {
6203 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6204 }
6205}
6206
6207VALUE
6208rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6209{
6210 return vm_opt_newarray_max(ec, num, ptr);
6211}
6212
6213static VALUE
6214vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6215{
6216 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6217 if (num == 0) {
6218 return Qnil;
6219 }
6220 else {
6221 VALUE result = *ptr;
6222 rb_snum_t i = num - 1;
6223 while (i-- > 0) {
6224 const VALUE v = *++ptr;
6225 if (OPTIMIZED_CMP(v, result) < 0) {
6226 result = v;
6227 }
6228 }
6229 return result;
6230 }
6231 }
6232 else {
6233 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6234 }
6235}
6236
6237VALUE
6238rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6239{
6240 return vm_opt_newarray_min(ec, num, ptr);
6241}
6242
6243static VALUE
6244vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6245{
6246 // If Array#hash is _not_ monkeypatched, use the optimized call
6247 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6248 return rb_ary_hash_values(num, ptr);
6249 }
6250 else {
6251 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6252 }
6253}
6254
6255VALUE
6256rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6257{
6258 return vm_opt_newarray_hash(ec, num, ptr);
6259}
6260
6261VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6262VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6263
6264static VALUE
6265vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6266{
6267 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6268 struct RArray fake_ary;
6269 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6270 return rb_ary_includes(ary, target);
6271 }
6272 else {
6273 VALUE args[1] = {target};
6274 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6275 }
6276}
6277
6278VALUE
6279rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6280{
6281 return vm_opt_newarray_include_p(ec, num, ptr, target);
6282}
6283
6284static VALUE
6285vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6286{
6287 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6288 struct RArray fake_ary;
6289 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6290 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6291 }
6292 else {
6293 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6294 // Setup an array with room for keyword hash.
6295 VALUE args[2];
6296 args[0] = fmt;
6297 int kw_splat = RB_NO_KEYWORDS;
6298 int argc = 1;
6299
6300 if (!UNDEF_P(buffer)) {
6301 args[1] = rb_hash_new_with_size(1);
6302 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6303 kw_splat = RB_PASS_KEYWORDS;
6304 argc++;
6305 }
6306
6307 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6308 }
6309}
6310
6311VALUE
6312rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6313{
6314 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6315}
6316
6317VALUE
6318rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6319{
6320 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6321}
6322
6323#undef id_cmp
6324
6325static void
6326vm_track_constant_cache(ID id, void *ic)
6327{
6328 rb_vm_t *vm = GET_VM();
6329 struct rb_id_table *const_cache = vm->constant_cache;
6330 VALUE lookup_result;
6331 set_table *ics;
6332
6333 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6334 ics = (set_table *)lookup_result;
6335 }
6336 else {
6337 ics = set_init_numtable();
6338 rb_id_table_insert(const_cache, id, (VALUE)ics);
6339 }
6340
6341 /* The call below to st_insert could allocate which could trigger a GC.
6342 * If it triggers a GC, it may free an iseq that also holds a cache to this
6343 * constant. If that iseq is the last iseq with a cache to this constant, then
6344 * it will free this ST table, which would cause an use-after-free during this
6345 * st_insert.
6346 *
6347 * So to fix this issue, we store the ID that is currently being inserted
6348 * and, in remove_from_constant_cache, we don't free the ST table for ID
6349 * equal to this one.
6350 *
6351 * See [Bug #20921].
6352 */
6353 vm->inserting_constant_cache_id = id;
6354
6355 set_insert(ics, (st_data_t)ic);
6356
6357 vm->inserting_constant_cache_id = (ID)0;
6358}
6359
6360static void
6361vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6362{
6363 RB_VM_LOCKING() {
6364 for (int i = 0; segments[i]; i++) {
6365 ID id = segments[i];
6366 if (id == idNULL) continue;
6367 vm_track_constant_cache(id, ic);
6368 }
6369 }
6370}
6371
6372// For JIT inlining
6373static inline bool
6374vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6375{
6376 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6377 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6378
6379 return (ic_cref == NULL || // no need to check CREF
6380 ic_cref == vm_get_cref(reg_ep));
6381 }
6382 return false;
6383}
6384
6385static bool
6386vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6387{
6388 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6389 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6390}
6391
6392// YJIT needs this function to never allocate and never raise
6393bool
6394rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6395{
6396 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6397}
6398
6399static void
6400vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6401{
6402 if (ruby_vm_const_missing_count > 0) {
6403 ruby_vm_const_missing_count = 0;
6404 ic->entry = NULL;
6405 return;
6406 }
6407
6408 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6409 RB_OBJ_WRITE(ice, &ice->value, val);
6410 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6411 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6412 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6413
6414 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6415 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6416 rb_yjit_constant_ic_update(iseq, ic, pos);
6417}
6418
6419VALUE
6420rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6421{
6422 VALUE val;
6423 const ID *segments = ic->segments;
6424 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6425 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6426 val = ice->value;
6427
6428 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6429 }
6430 else {
6431 ruby_vm_constant_cache_misses++;
6432 val = vm_get_ev_const_chain(ec, segments);
6433 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6434 // Undo the PC increment to get the address to this instruction
6435 // INSN_ATTR(width) == 2
6436 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6437 }
6438 return val;
6439}
6440
6441static VALUE
6442vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6443{
6444 rb_thread_t *th = rb_ec_thread_ptr(ec);
6445 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6446
6447 again:
6448 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6449 return is->once.value;
6450 }
6451 else if (is->once.running_thread == NULL) {
6452 VALUE val;
6453 is->once.running_thread = th;
6454 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6455 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6456 /* is->once.running_thread is cleared by vm_once_clear() */
6457 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6458 return val;
6459 }
6460 else if (is->once.running_thread == th) {
6461 /* recursive once */
6462 return vm_once_exec((VALUE)iseq);
6463 }
6464 else {
6465 /* waiting for finish */
6466 RUBY_VM_CHECK_INTS(ec);
6468 goto again;
6469 }
6470}
6471
6472static OFFSET
6473vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6474{
6475 switch (OBJ_BUILTIN_TYPE(key)) {
6476 case -1:
6477 case T_FLOAT:
6478 case T_SYMBOL:
6479 case T_BIGNUM:
6480 case T_STRING:
6481 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6482 SYMBOL_REDEFINED_OP_FLAG |
6483 INTEGER_REDEFINED_OP_FLAG |
6484 FLOAT_REDEFINED_OP_FLAG |
6485 NIL_REDEFINED_OP_FLAG |
6486 TRUE_REDEFINED_OP_FLAG |
6487 FALSE_REDEFINED_OP_FLAG |
6488 STRING_REDEFINED_OP_FLAG)) {
6489 st_data_t val;
6490 if (RB_FLOAT_TYPE_P(key)) {
6491 double kval = RFLOAT_VALUE(key);
6492 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6493 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6494 }
6495 }
6496 if (rb_hash_stlike_lookup(hash, key, &val)) {
6497 return FIX2LONG((VALUE)val);
6498 }
6499 else {
6500 return else_offset;
6501 }
6502 }
6503 }
6504 return 0;
6505}
6506
6507NORETURN(static void
6508 vm_stack_consistency_error(const rb_execution_context_t *ec,
6509 const rb_control_frame_t *,
6510 const VALUE *));
6511static void
6512vm_stack_consistency_error(const rb_execution_context_t *ec,
6513 const rb_control_frame_t *cfp,
6514 const VALUE *bp)
6515{
6516 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6517 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6518 static const char stack_consistency_error[] =
6519 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6520#if defined RUBY_DEVEL
6521 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6522 rb_str_cat_cstr(mesg, "\n");
6523 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6525#else
6526 rb_bug(stack_consistency_error, nsp, nbp);
6527#endif
6528}
6529
6530static VALUE
6531vm_opt_plus(VALUE recv, VALUE obj)
6532{
6533 if (FIXNUM_2_P(recv, obj) &&
6534 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6535 return rb_fix_plus_fix(recv, obj);
6536 }
6537 else if (FLONUM_2_P(recv, obj) &&
6538 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6539 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6540 }
6541 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6542 return Qundef;
6543 }
6544 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6545 RBASIC_CLASS(obj) == rb_cFloat &&
6546 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6547 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6548 }
6549 else if (RBASIC_CLASS(recv) == rb_cString &&
6550 RBASIC_CLASS(obj) == rb_cString &&
6551 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6552 return rb_str_opt_plus(recv, obj);
6553 }
6554 else if (RBASIC_CLASS(recv) == rb_cArray &&
6555 RBASIC_CLASS(obj) == rb_cArray &&
6556 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6557 return rb_ary_plus(recv, obj);
6558 }
6559 else {
6560 return Qundef;
6561 }
6562}
6563
6564static VALUE
6565vm_opt_minus(VALUE recv, VALUE obj)
6566{
6567 if (FIXNUM_2_P(recv, obj) &&
6568 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6569 return rb_fix_minus_fix(recv, obj);
6570 }
6571 else if (FLONUM_2_P(recv, obj) &&
6572 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6573 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6574 }
6575 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6576 return Qundef;
6577 }
6578 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6579 RBASIC_CLASS(obj) == rb_cFloat &&
6580 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6581 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6582 }
6583 else {
6584 return Qundef;
6585 }
6586}
6587
6588static VALUE
6589vm_opt_mult(VALUE recv, VALUE obj)
6590{
6591 if (FIXNUM_2_P(recv, obj) &&
6592 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6593 return rb_fix_mul_fix(recv, obj);
6594 }
6595 else if (FLONUM_2_P(recv, obj) &&
6596 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6597 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6598 }
6599 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6600 return Qundef;
6601 }
6602 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6603 RBASIC_CLASS(obj) == rb_cFloat &&
6604 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6605 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6606 }
6607 else {
6608 return Qundef;
6609 }
6610}
6611
6612static VALUE
6613vm_opt_div(VALUE recv, VALUE obj)
6614{
6615 if (FIXNUM_2_P(recv, obj) &&
6616 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6617 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6618 }
6619 else if (FLONUM_2_P(recv, obj) &&
6620 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6621 return rb_flo_div_flo(recv, obj);
6622 }
6623 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6624 return Qundef;
6625 }
6626 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6627 RBASIC_CLASS(obj) == rb_cFloat &&
6628 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6629 return rb_flo_div_flo(recv, obj);
6630 }
6631 else {
6632 return Qundef;
6633 }
6634}
6635
6636static VALUE
6637vm_opt_mod(VALUE recv, VALUE obj)
6638{
6639 if (FIXNUM_2_P(recv, obj) &&
6640 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6641 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6642 }
6643 else if (FLONUM_2_P(recv, obj) &&
6644 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6645 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6646 }
6647 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6648 return Qundef;
6649 }
6650 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6651 RBASIC_CLASS(obj) == rb_cFloat &&
6652 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6653 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6654 }
6655 else {
6656 return Qundef;
6657 }
6658}
6659
6660static VALUE
6661vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6662{
6663 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6664 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6665
6666 if (!UNDEF_P(val)) {
6667 return RBOOL(!RTEST(val));
6668 }
6669 }
6670
6671 return Qundef;
6672}
6673
6674static VALUE
6675vm_opt_lt(VALUE recv, VALUE obj)
6676{
6677 if (FIXNUM_2_P(recv, obj) &&
6678 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6679 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6680 }
6681 else if (FLONUM_2_P(recv, obj) &&
6682 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6683 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6684 }
6685 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6686 return Qundef;
6687 }
6688 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6689 RBASIC_CLASS(obj) == rb_cFloat &&
6690 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6691 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6692 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6693 }
6694 else {
6695 return Qundef;
6696 }
6697}
6698
6699static VALUE
6700vm_opt_le(VALUE recv, VALUE obj)
6701{
6702 if (FIXNUM_2_P(recv, obj) &&
6703 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6704 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6705 }
6706 else if (FLONUM_2_P(recv, obj) &&
6707 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6708 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6709 }
6710 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6711 return Qundef;
6712 }
6713 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6714 RBASIC_CLASS(obj) == rb_cFloat &&
6715 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6716 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6717 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6718 }
6719 else {
6720 return Qundef;
6721 }
6722}
6723
6724static VALUE
6725vm_opt_gt(VALUE recv, VALUE obj)
6726{
6727 if (FIXNUM_2_P(recv, obj) &&
6728 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6729 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6730 }
6731 else if (FLONUM_2_P(recv, obj) &&
6732 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6733 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6734 }
6735 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6736 return Qundef;
6737 }
6738 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6739 RBASIC_CLASS(obj) == rb_cFloat &&
6740 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6741 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6742 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6743 }
6744 else {
6745 return Qundef;
6746 }
6747}
6748
6749static VALUE
6750vm_opt_ge(VALUE recv, VALUE obj)
6751{
6752 if (FIXNUM_2_P(recv, obj) &&
6753 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6754 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6755 }
6756 else if (FLONUM_2_P(recv, obj) &&
6757 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6758 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6759 }
6760 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6761 return Qundef;
6762 }
6763 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6764 RBASIC_CLASS(obj) == rb_cFloat &&
6765 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6766 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6767 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6768 }
6769 else {
6770 return Qundef;
6771 }
6772}
6773
6774
6775static VALUE
6776vm_opt_ltlt(VALUE recv, VALUE obj)
6777{
6778 if (SPECIAL_CONST_P(recv)) {
6779 return Qundef;
6780 }
6781 else if (RBASIC_CLASS(recv) == rb_cString &&
6782 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6783 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6784 return rb_str_buf_append(recv, obj);
6785 }
6786 else {
6787 return rb_str_concat(recv, obj);
6788 }
6789 }
6790 else if (RBASIC_CLASS(recv) == rb_cArray &&
6791 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6792 return rb_ary_push(recv, obj);
6793 }
6794 else {
6795 return Qundef;
6796 }
6797}
6798
6799static VALUE
6800vm_opt_and(VALUE recv, VALUE obj)
6801{
6802 // If recv and obj are both fixnums, then the bottom tag bit
6803 // will be 1 on both. 1 & 1 == 1, so the result value will also
6804 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6805 // will be 0, and we return Qundef.
6806 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6807
6808 if (FIXNUM_P(ret) &&
6809 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6810 return ret;
6811 }
6812 else {
6813 return Qundef;
6814 }
6815}
6816
6817static VALUE
6818vm_opt_or(VALUE recv, VALUE obj)
6819{
6820 if (FIXNUM_2_P(recv, obj) &&
6821 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6822 return recv | obj;
6823 }
6824 else {
6825 return Qundef;
6826 }
6827}
6828
6829static VALUE
6830vm_opt_aref(VALUE recv, VALUE obj)
6831{
6832 if (SPECIAL_CONST_P(recv)) {
6833 if (FIXNUM_2_P(recv, obj) &&
6834 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6835 return rb_fix_aref(recv, obj);
6836 }
6837 return Qundef;
6838 }
6839 else if (RBASIC_CLASS(recv) == rb_cArray &&
6840 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6841 if (FIXNUM_P(obj)) {
6842 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6843 }
6844 else {
6845 return rb_ary_aref1(recv, obj);
6846 }
6847 }
6848 else if (RBASIC_CLASS(recv) == rb_cHash &&
6849 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6850 return rb_hash_aref(recv, obj);
6851 }
6852 else {
6853 return Qundef;
6854 }
6855}
6856
6857static VALUE
6858vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6859{
6860 if (SPECIAL_CONST_P(recv)) {
6861 return Qundef;
6862 }
6863 else if (RBASIC_CLASS(recv) == rb_cArray &&
6864 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6865 FIXNUM_P(obj)) {
6866 rb_ary_store(recv, FIX2LONG(obj), set);
6867 return set;
6868 }
6869 else if (RBASIC_CLASS(recv) == rb_cHash &&
6870 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6871 rb_hash_aset(recv, obj, set);
6872 return set;
6873 }
6874 else {
6875 return Qundef;
6876 }
6877}
6878
6879static VALUE
6880vm_opt_aref_with(VALUE recv, VALUE key)
6881{
6882 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6883 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6884 rb_hash_compare_by_id_p(recv) == Qfalse &&
6885 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6886 return rb_hash_aref(recv, key);
6887 }
6888 else {
6889 return Qundef;
6890 }
6891}
6892
6893VALUE
6894rb_vm_opt_aref_with(VALUE recv, VALUE key)
6895{
6896 return vm_opt_aref_with(recv, key);
6897}
6898
6899static VALUE
6900vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6901{
6902 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6903 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6904 rb_hash_compare_by_id_p(recv) == Qfalse) {
6905 return rb_hash_aset(recv, key, val);
6906 }
6907 else {
6908 return Qundef;
6909 }
6910}
6911
6912VALUE
6913rb_vm_opt_aset_with(VALUE recv, VALUE key, VALUE value)
6914{
6915 return vm_opt_aset_with(recv, key, value);
6916}
6917
6918static VALUE
6919vm_opt_length(VALUE recv, int bop)
6920{
6921 if (SPECIAL_CONST_P(recv)) {
6922 return Qundef;
6923 }
6924 else if (RBASIC_CLASS(recv) == rb_cString &&
6925 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6926 if (bop == BOP_EMPTY_P) {
6927 return LONG2NUM(RSTRING_LEN(recv));
6928 }
6929 else {
6930 return rb_str_length(recv);
6931 }
6932 }
6933 else if (RBASIC_CLASS(recv) == rb_cArray &&
6934 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6935 return LONG2NUM(RARRAY_LEN(recv));
6936 }
6937 else if (RBASIC_CLASS(recv) == rb_cHash &&
6938 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6939 return INT2FIX(RHASH_SIZE(recv));
6940 }
6941 else {
6942 return Qundef;
6943 }
6944}
6945
6946static VALUE
6947vm_opt_empty_p(VALUE recv)
6948{
6949 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6950 case Qundef: return Qundef;
6951 case INT2FIX(0): return Qtrue;
6952 default: return Qfalse;
6953 }
6954}
6955
6956VALUE rb_false(VALUE obj);
6957
6958static VALUE
6959vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6960{
6961 if (NIL_P(recv) &&
6962 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6963 return Qtrue;
6964 }
6965 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6966 return Qfalse;
6967 }
6968 else {
6969 return Qundef;
6970 }
6971}
6972
6973static VALUE
6974fix_succ(VALUE x)
6975{
6976 switch (x) {
6977 case ~0UL:
6978 /* 0xFFFF_FFFF == INT2FIX(-1)
6979 * `-1.succ` is of course 0. */
6980 return INT2FIX(0);
6981 case RSHIFT(~0UL, 1):
6982 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6983 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6984 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6985 default:
6986 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6987 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6988 * == lx*2 + ly*2 + 1
6989 * == (lx*2+1) + (ly*2+1) - 1
6990 * == x + y - 1
6991 *
6992 * Here, if we put y := INT2FIX(1):
6993 *
6994 * == x + INT2FIX(1) - 1
6995 * == x + 2 .
6996 */
6997 return x + 2;
6998 }
6999}
7000
7001static VALUE
7002vm_opt_succ(VALUE recv)
7003{
7004 if (FIXNUM_P(recv) &&
7005 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7006 return fix_succ(recv);
7007 }
7008 else if (SPECIAL_CONST_P(recv)) {
7009 return Qundef;
7010 }
7011 else if (RBASIC_CLASS(recv) == rb_cString &&
7012 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7013 return rb_str_succ(recv);
7014 }
7015 else {
7016 return Qundef;
7017 }
7018}
7019
7020static VALUE
7021vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7022{
7023 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7024 return RBOOL(!RTEST(recv));
7025 }
7026 else {
7027 return Qundef;
7028 }
7029}
7030
7031static VALUE
7032vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7033{
7034 if (SPECIAL_CONST_P(recv)) {
7035 return Qundef;
7036 }
7037 else if (RBASIC_CLASS(recv) == rb_cString &&
7038 CLASS_OF(obj) == rb_cRegexp &&
7039 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7040 return rb_reg_match(obj, recv);
7041 }
7042 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7043 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7044 return rb_reg_match(recv, obj);
7045 }
7046 else {
7047 return Qundef;
7048 }
7049}
7050
7051rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7052
7053NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7054
7055static inline void
7056vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7057 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7058 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7059{
7060 rb_event_flag_t event = pc_events & target_event;
7061 VALUE self = GET_SELF();
7062
7063 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7064
7065 if (event & global_hooks->events) {
7066 /* increment PC because source line is calculated with PC-1 */
7067 reg_cfp->pc++;
7068 vm_dtrace(event, ec);
7069 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7070 reg_cfp->pc--;
7071 }
7072
7073 // Load here since global hook above can add and free local hooks
7074 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7075 if (local_hooks != NULL) {
7076 if (event & local_hooks->events) {
7077 /* increment PC because source line is calculated with PC-1 */
7078 reg_cfp->pc++;
7079 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7080 reg_cfp->pc--;
7081 }
7082 }
7083}
7084
7085#define VM_TRACE_HOOK(target_event, val) do { \
7086 if ((pc_events & (target_event)) & enabled_flags) { \
7087 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7088 } \
7089} while (0)
7090
7091static VALUE
7092rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7093{
7094 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7095 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7096 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7097}
7098
7099static void
7100vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7101{
7102 const VALUE *pc = reg_cfp->pc;
7103 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7104 rb_event_flag_t global_events = enabled_flags;
7105
7106 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7107 return;
7108 }
7109 else {
7110 const rb_iseq_t *iseq = reg_cfp->iseq;
7111 VALUE iseq_val = (VALUE)iseq;
7112 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7113 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7114 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7115 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7116 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7117 rb_hook_list_t *bmethod_local_hooks = NULL;
7118 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7119 rb_event_flag_t bmethod_local_events = 0;
7120 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7121 enabled_flags |= iseq_local_events;
7122
7123 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7124
7125 if (bmethod_frame) {
7126 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7127 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7128 bmethod_local_hooks = me->def->body.bmethod.hooks;
7129 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7130 if (bmethod_local_hooks) {
7131 bmethod_local_events = bmethod_local_hooks->events;
7132 }
7133 }
7134
7135
7136 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7137#if 0
7138 /* disable trace */
7139 /* TODO: incomplete */
7140 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7141#else
7142 /* do not disable trace because of performance problem
7143 * (re-enable overhead)
7144 */
7145#endif
7146 return;
7147 }
7148 else if (ec->trace_arg != NULL) {
7149 /* already tracing */
7150 return;
7151 }
7152 else {
7153 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7154 /* Note, not considering iseq local events here since the same
7155 * iseq could be used in multiple bmethods. */
7156 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7157
7158 if (0) {
7159 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7160 (int)pos,
7161 (int)pc_events,
7162 RSTRING_PTR(rb_iseq_path(iseq)),
7163 (int)rb_iseq_line_no(iseq, pos),
7164 RSTRING_PTR(rb_iseq_label(iseq)));
7165 }
7166 VM_ASSERT(reg_cfp->pc == pc);
7167 VM_ASSERT(pc_events != 0);
7168
7169 /* check traces */
7170 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7171 /* b_call instruction running as a method. Fire call event. */
7172 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7173 }
7175 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7176 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7177 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7178 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7179 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7180 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7181 /* b_return instruction running as a method. Fire return event. */
7182 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7183 }
7184
7185 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7186 // We need the pointer to stay valid in case compaction happens in a trace hook.
7187 //
7188 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7189 // storage for `rb_method_definition_t` is not on the GC heap.
7190 RB_GC_GUARD(iseq_val);
7191 }
7192 }
7193}
7194#undef VM_TRACE_HOOK
7195
7196#if VM_CHECK_MODE > 0
7197NORETURN( NOINLINE( COLDFUNC
7198void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7199
7200void
7201Init_vm_stack_canary(void)
7202{
7203 /* This has to be called _after_ our PRNG is properly set up. */
7204 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7205 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7206
7207 vm_stack_canary_was_born = true;
7208 VM_ASSERT(n == 0);
7209}
7210
7211void
7212rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7213{
7214 /* Because a method has already been called, why not call
7215 * another one. */
7216 const char *insn = rb_insns_name(i);
7217 VALUE inspection = rb_inspect(c);
7218 const char *str = StringValueCStr(inspection);
7219
7220 rb_bug("dead canary found at %s: %s", insn, str);
7221}
7222
7223#else
7224void Init_vm_stack_canary(void) { /* nothing to do */ }
7225#endif
7226
7227
7228/* a part of the following code is generated by this ruby script:
7229
723016.times{|i|
7231 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7232 typedef_args.prepend(", ") if i != 0
7233 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7234 call_args.prepend(", ") if i != 0
7235 puts %Q{
7236static VALUE
7237builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7238{
7239 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7240 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7241}}
7242}
7243
7244puts
7245puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
724616.times{|i|
7247 puts " builtin_invoker#{i},"
7248}
7249puts "};"
7250*/
7251
7252static VALUE
7253builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7254{
7255 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7256 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7257}
7258
7259static VALUE
7260builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7261{
7262 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7263 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7264}
7265
7266static VALUE
7267builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7268{
7269 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7270 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7271}
7272
7273static VALUE
7274builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7275{
7276 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7277 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7278}
7279
7280static VALUE
7281builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7282{
7283 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7284 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7285}
7286
7287static VALUE
7288builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7289{
7290 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7291 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7292}
7293
7294static VALUE
7295builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7296{
7297 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7298 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7299}
7300
7301static VALUE
7302builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7303{
7304 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7305 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7306}
7307
7308static VALUE
7309builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7310{
7311 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7312 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7313}
7314
7315static VALUE
7316builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7317{
7318 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7319 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7320}
7321
7322static VALUE
7323builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7324{
7325 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7326 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7327}
7328
7329static VALUE
7330builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7331{
7332 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7333 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7334}
7335
7336static VALUE
7337builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7338{
7339 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7340 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7341}
7342
7343static VALUE
7344builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7345{
7346 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7347 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7348}
7349
7350static VALUE
7351builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7352{
7353 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7354 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7355}
7356
7357static VALUE
7358builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7359{
7360 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7361 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7362}
7363
7364typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7365
7366static builtin_invoker
7367lookup_builtin_invoker(int argc)
7368{
7369 static const builtin_invoker invokers[] = {
7370 builtin_invoker0,
7371 builtin_invoker1,
7372 builtin_invoker2,
7373 builtin_invoker3,
7374 builtin_invoker4,
7375 builtin_invoker5,
7376 builtin_invoker6,
7377 builtin_invoker7,
7378 builtin_invoker8,
7379 builtin_invoker9,
7380 builtin_invoker10,
7381 builtin_invoker11,
7382 builtin_invoker12,
7383 builtin_invoker13,
7384 builtin_invoker14,
7385 builtin_invoker15,
7386 };
7387
7388 return invokers[argc];
7389}
7390
7391static inline VALUE
7392invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7393{
7394 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7395 SETUP_CANARY(canary_p);
7396 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7397 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7398 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7399 return ret;
7400}
7401
7402static VALUE
7403vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7404{
7405 return invoke_bf(ec, cfp, bf, argv);
7406}
7407
7408static VALUE
7409vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7410{
7411 if (0) { // debug print
7412 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7413 for (int i=0; i<bf->argc; i++) {
7414 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7415 }
7416 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7417 (void *)(uintptr_t)bf->func_ptr);
7418 }
7419
7420 if (bf->argc == 0) {
7421 return invoke_bf(ec, cfp, bf, NULL);
7422 }
7423 else {
7424 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7425 return invoke_bf(ec, cfp, bf, argv);
7426 }
7427}
7428
7429// for __builtin_inline!()
7430
7431VALUE
7432rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7433{
7434 const rb_control_frame_t *cfp = ec->cfp;
7435 return cfp->ep[index];
7436}
7437
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2780
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1564
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1456
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1435
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:65
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:681
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:694
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:64
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2090
VALUE rb_cRegexp
Regexp class.
Definition re.c:2661
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1276
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:242
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:660
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:60
VALUE rb_cModule
Module class.
Definition object.c:63
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:232
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:845
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:82
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1029
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1951
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3716
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1926
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2008
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1909
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1975
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2041
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:4107
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5743
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:4073
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4356
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2750
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:884
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1490
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3606
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2100
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4397
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4453
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1478
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:4073
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3441
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:135
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3612
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:417
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2175
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3935
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4475
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:374
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3929
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1389
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1928
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1117
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:972
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:86
SVAR (Special VARiable)
Definition imemo.h:50
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:59
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376