Ruby 3.5.0dev (2025-08-27 revision 61d26c35bf8c744b4c59a44536bc58a6c4653ab6)
vm_insnhelper.c (61d26c35bf8c744b4c59a44536bc58a6c4653ab6)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
766{
767 const VALUE *ep = cfp->ep;
769
770 while (!VM_ENV_LOCAL_P(ep)) {
771 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
772 ep = VM_ENV_PREV_EP(ep);
773 }
774
775 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
776}
777
778static const rb_iseq_t *
779method_entry_iseqptr(const rb_callable_method_entry_t *me)
780{
781 switch (me->def->type) {
782 case VM_METHOD_TYPE_ISEQ:
783 return me->def->body.iseq.iseqptr;
784 default:
785 return NULL;
786 }
787}
788
789static rb_cref_t *
790method_entry_cref(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.cref;
795 default:
796 return NULL;
797 }
798}
799
800#if VM_CHECK_MODE == 0
801PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
802#endif
803static rb_cref_t *
804check_cref(VALUE obj, int can_be_svar)
805{
806 if (obj == Qfalse) return NULL;
807
808#if VM_CHECK_MODE > 0
809 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
810#endif
811
812 switch (imemo_type(obj)) {
813 case imemo_ment:
814 return method_entry_cref((rb_callable_method_entry_t *)obj);
815 case imemo_cref:
816 return (rb_cref_t *)obj;
817 case imemo_svar:
818 if (can_be_svar) {
819 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
820 }
821 default:
822#if VM_CHECK_MODE > 0
823 rb_bug("check_method_entry: svar should not be there:");
824#endif
825 return NULL;
826 }
827}
828
829static inline rb_cref_t *
830vm_env_cref(const VALUE *ep)
831{
832 rb_cref_t *cref;
833
834 while (!VM_ENV_LOCAL_P(ep)) {
835 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
836 ep = VM_ENV_PREV_EP(ep);
837 }
838
839 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
840}
841
842static int
843is_cref(const VALUE v, int can_be_svar)
844{
845 if (RB_TYPE_P(v, T_IMEMO)) {
846 switch (imemo_type(v)) {
847 case imemo_cref:
848 return TRUE;
849 case imemo_svar:
850 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
851 default:
852 break;
853 }
854 }
855 return FALSE;
856}
857
858static int
859vm_env_cref_by_cref(const VALUE *ep)
860{
861 while (!VM_ENV_LOCAL_P(ep)) {
862 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
863 ep = VM_ENV_PREV_EP(ep);
864 }
865 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
866}
867
868static rb_cref_t *
869cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
870{
871 const VALUE v = *vptr;
872 rb_cref_t *cref, *new_cref;
873
874 if (RB_TYPE_P(v, T_IMEMO)) {
875 switch (imemo_type(v)) {
876 case imemo_cref:
877 cref = (rb_cref_t *)v;
878 new_cref = vm_cref_dup(cref);
879 if (parent) {
880 RB_OBJ_WRITE(parent, vptr, new_cref);
881 }
882 else {
883 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
884 }
885 return (rb_cref_t *)new_cref;
886 case imemo_svar:
887 if (can_be_svar) {
888 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
889 }
890 /* fall through */
891 case imemo_ment:
892 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
893 default:
894 break;
895 }
896 }
897 return NULL;
898}
899
900static rb_cref_t *
901vm_cref_replace_with_duplicated_cref(const VALUE *ep)
902{
903 if (vm_env_cref_by_cref(ep)) {
904 rb_cref_t *cref;
905 VALUE envval;
906
907 while (!VM_ENV_LOCAL_P(ep)) {
908 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
909 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
910 return cref;
911 }
912 ep = VM_ENV_PREV_EP(ep);
913 }
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
916 }
917 else {
918 rb_bug("vm_cref_dup: unreachable");
919 }
920}
921
922static rb_cref_t *
923vm_get_cref(const VALUE *ep)
924{
925 rb_cref_t *cref = vm_env_cref(ep);
926
927 if (cref != NULL) {
928 return cref;
929 }
930 else {
931 rb_bug("vm_get_cref: unreachable");
932 }
933}
934
935rb_cref_t *
936rb_vm_get_cref(const VALUE *ep)
937{
938 return vm_get_cref(ep);
939}
940
941static rb_cref_t *
942vm_ec_cref(const rb_execution_context_t *ec)
943{
944 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
945
946 if (cfp == NULL) {
947 return NULL;
948 }
949 return vm_get_cref(cfp->ep);
950}
951
952static const rb_cref_t *
953vm_get_const_key_cref(const VALUE *ep)
954{
955 const rb_cref_t *cref = vm_get_cref(ep);
956 const rb_cref_t *key_cref = cref;
957
958 while (cref) {
959 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
960 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
961 return key_cref;
962 }
963 cref = CREF_NEXT(cref);
964 }
965
966 /* does not include singleton class */
967 return NULL;
968}
969
970rb_cref_t *
971rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
972{
973 rb_cref_t *new_cref_head = NULL;
974 rb_cref_t *new_cref_tail = NULL;
975
976 #define ADD_NEW_CREF(new_cref) \
977 if (new_cref_tail) { \
978 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
979 } \
980 else { \
981 new_cref_head = new_cref; \
982 } \
983 new_cref_tail = new_cref;
984
985 while (cref) {
986 rb_cref_t *new_cref;
987 if (CREF_CLASS(cref) == old_klass) {
988 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989 ADD_NEW_CREF(new_cref);
990 return new_cref_head;
991 }
992 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993 cref = CREF_NEXT(cref);
994 ADD_NEW_CREF(new_cref);
995 }
996
997 #undef ADD_NEW_CREF
998
999 // Could we just reuse the original cref?
1000 return new_cref_head;
1001}
1002
1003static rb_cref_t *
1004vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1005{
1006 rb_cref_t *prev_cref = NULL;
1007
1008 if (ep) {
1009 prev_cref = vm_env_cref(ep);
1010 }
1011 else {
1012 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1013
1014 if (cfp) {
1015 prev_cref = vm_env_cref(cfp->ep);
1016 }
1017 }
1018
1019 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1020}
1021
1022static inline VALUE
1023vm_get_cbase(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 return CREF_CLASS_FOR_DEFINITION(cref);
1028}
1029
1030static inline VALUE
1031vm_get_const_base(const VALUE *ep)
1032{
1033 const rb_cref_t *cref = vm_get_cref(ep);
1034
1035 while (cref) {
1036 if (!CREF_PUSHED_BY_EVAL(cref)) {
1037 return CREF_CLASS_FOR_DEFINITION(cref);
1038 }
1039 cref = CREF_NEXT(cref);
1040 }
1041
1042 return Qundef;
1043}
1044
1045static inline void
1046vm_check_if_namespace(VALUE klass)
1047{
1048 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1049 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1050 }
1051}
1052
1053static inline void
1054vm_ensure_not_refinement_module(VALUE self)
1055{
1056 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1057 rb_warn("not defined at the refinement, but at the outer class/module");
1058 }
1059}
1060
1061static inline VALUE
1062vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1063{
1064 return klass;
1065}
1066
1067static inline VALUE
1068vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1069{
1070 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1071 VALUE val;
1072
1073 if (NIL_P(orig_klass) && allow_nil) {
1074 /* in current lexical scope */
1075 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1076 const rb_cref_t *cref;
1077 VALUE klass = Qnil;
1078
1079 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1080 root_cref = CREF_NEXT(root_cref);
1081 }
1082 cref = root_cref;
1083 while (cref && CREF_NEXT(cref)) {
1084 if (CREF_PUSHED_BY_EVAL(cref)) {
1085 klass = Qnil;
1086 }
1087 else {
1088 klass = CREF_CLASS(cref);
1089 }
1090 cref = CREF_NEXT(cref);
1091
1092 if (!NIL_P(klass)) {
1093 VALUE av, am = 0;
1094 rb_const_entry_t *ce;
1095 search_continue:
1096 if ((ce = rb_const_lookup(klass, id))) {
1097 rb_const_warn_if_deprecated(ce, klass, id);
1098 val = ce->value;
1099 if (UNDEF_P(val)) {
1100 if (am == klass) break;
1101 am = klass;
1102 if (is_defined) return 1;
1103 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1104 rb_autoload_load(klass, id);
1105 goto search_continue;
1106 }
1107 else {
1108 if (is_defined) {
1109 return 1;
1110 }
1111 else {
1112 if (UNLIKELY(!rb_ractor_main_p())) {
1113 if (!rb_ractor_shareable_p(val)) {
1114 rb_raise(rb_eRactorIsolationError,
1115 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1116 }
1117 }
1118 return val;
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125 /* search self */
1126 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1127 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1128 }
1129 else {
1130 klass = CLASS_OF(ec->cfp->self);
1131 }
1132
1133 if (is_defined) {
1134 return rb_const_defined(klass, id);
1135 }
1136 else {
1137 return rb_const_get(klass, id);
1138 }
1139 }
1140 else {
1141 vm_check_if_namespace(orig_klass);
1142 if (is_defined) {
1143 return rb_public_const_defined_from(orig_klass, id);
1144 }
1145 else {
1146 return rb_public_const_get_from(orig_klass, id);
1147 }
1148 }
1149}
1150
1151VALUE
1152rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1153{
1154 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1155}
1156
1157static inline VALUE
1158vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1159{
1160 VALUE val = Qnil;
1161 int idx = 0;
1162 int allow_nil = TRUE;
1163 if (segments[0] == idNULL) {
1164 val = rb_cObject;
1165 idx++;
1166 allow_nil = FALSE;
1167 }
1168 while (segments[idx]) {
1169 ID id = segments[idx++];
1170 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1171 allow_nil = FALSE;
1172 }
1173 return val;
1174}
1175
1176
1177static inline VALUE
1178vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1179{
1180 VALUE klass;
1181
1182 if (!cref) {
1183 rb_bug("vm_get_cvar_base: no cref");
1184 }
1185
1186 while (CREF_NEXT(cref) &&
1187 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1188 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1189 cref = CREF_NEXT(cref);
1190 }
1191 if (top_level_raise && !CREF_NEXT(cref)) {
1192 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1193 }
1194
1195 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1196
1197 if (NIL_P(klass)) {
1198 rb_raise(rb_eTypeError, "no class variables available");
1199 }
1200 return klass;
1201}
1202
1203ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1204static inline void
1205fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1206{
1207 if (is_attr) {
1208 vm_cc_attr_index_set(cc, index, shape_id);
1209 }
1210 else {
1211 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1212 }
1213}
1214
1215#define ractor_incidental_shareable_p(cond, val) \
1216 (!(cond) || rb_ractor_shareable_p(val))
1217#define ractor_object_incidental_shareable_p(obj, val) \
1218 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1219
1220ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1221static inline VALUE
1222vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1223{
1224 VALUE fields_obj;
1225#if OPT_IC_FOR_IVAR
1226 if (SPECIAL_CONST_P(obj)) {
1227 return default_value;
1228 }
1229
1230 switch (BUILTIN_TYPE(obj)) {
1231 case T_OBJECT:
1232 fields_obj = obj;
1233 break;
1234 case T_CLASS:
1235 case T_MODULE:
1236 {
1237 if (UNLIKELY(!rb_ractor_main_p())) {
1238 // For two reasons we can only use the fast path on the main
1239 // ractor.
1240 // First, only the main ractor is allowed to set ivars on classes
1241 // and modules. So we can skip locking.
1242 // Second, other ractors need to check the shareability of the
1243 // values returned from the class ivars.
1244
1245 if (default_value == Qundef) { // defined?
1246 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1247 }
1248 else {
1249 goto general_path;
1250 }
1251 }
1252
1253 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1254 break;
1255 }
1256 default:
1257 fields_obj = rb_obj_fields(obj, id);
1258 }
1259
1260 if (!fields_obj) {
1261 return default_value;
1262 }
1263
1264 VALUE val = Qundef;
1265
1266 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1267 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1268
1269 shape_id_t cached_id;
1270 attr_index_t index;
1271
1272 if (is_attr) {
1273 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1274 }
1275 else {
1276 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1277 }
1278
1279 if (LIKELY(cached_id == shape_id)) {
1280 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1281
1282 if (index == ATTR_INDEX_NOT_SET) {
1283 return default_value;
1284 }
1285
1286 val = ivar_list[index];
1287#if USE_DEBUG_COUNTER
1288 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1289
1290 if (RB_TYPE_P(obj, T_OBJECT)) {
1291 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1292 }
1293#endif
1294 RUBY_ASSERT(!UNDEF_P(val));
1295 }
1296 else { // cache miss case
1297#if USE_DEBUG_COUNTER
1298 if (is_attr) {
1299 if (cached_id != INVALID_SHAPE_ID) {
1300 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1301 }
1302 else {
1303 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1304 }
1305 }
1306 else {
1307 if (cached_id != INVALID_SHAPE_ID) {
1308 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1309 }
1310 else {
1311 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1312 }
1313 }
1314 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1315
1316 if (RB_TYPE_P(obj, T_OBJECT)) {
1317 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1318 }
1319#endif
1320
1321 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1322 st_table *table = (st_table *)ivar_list;
1323
1324 RUBY_ASSERT(table);
1325 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1326
1327 if (!st_lookup(table, id, &val)) {
1328 val = default_value;
1329 }
1330 }
1331 else {
1332 shape_id_t previous_cached_id = cached_id;
1333 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1334 // This fills in the cache with the shared cache object.
1335 // "ent" is the shared cache object
1336 if (cached_id != previous_cached_id) {
1337 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1338 }
1339
1340 if (index == ATTR_INDEX_NOT_SET) {
1341 val = default_value;
1342 }
1343 else {
1344 // We fetched the ivar list above
1345 val = ivar_list[index];
1346 RUBY_ASSERT(!UNDEF_P(val));
1347 }
1348 }
1349 else {
1350 if (is_attr) {
1351 vm_cc_attr_index_initialize(cc, shape_id);
1352 }
1353 else {
1354 vm_ic_attr_index_initialize(ic, shape_id);
1355 }
1356
1357 val = default_value;
1358 }
1359 }
1360 }
1361
1362 if (!UNDEF_P(default_value)) {
1363 RUBY_ASSERT(!UNDEF_P(val));
1364 }
1365
1366 return val;
1367
1368general_path:
1369#endif /* OPT_IC_FOR_IVAR */
1370 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1371
1372 if (is_attr) {
1373 return rb_attr_get(obj, id);
1374 }
1375 else {
1376 return rb_ivar_get(obj, id);
1377 }
1378}
1379
1380static void
1381populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1382{
1383 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1384
1385 // Cache population code
1386 if (is_attr) {
1387 vm_cc_attr_index_set(cc, index, next_shape_id);
1388 }
1389 else {
1390 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1391 }
1392}
1393
1394ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1395NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1396NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1397
1398static VALUE
1399vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1400{
1401#if OPT_IC_FOR_IVAR
1402 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1403
1404 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1405 rb_check_frozen(obj);
1406
1407 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1408
1409 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1410
1411 if (!rb_shape_too_complex_p(next_shape_id)) {
1412 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1413 }
1414
1415 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1416 return val;
1417 }
1418#endif
1419 return rb_ivar_set(obj, id, val);
1420}
1421
1422static VALUE
1423vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1424{
1425 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1426}
1427
1428static VALUE
1429vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1430{
1431 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1432}
1433
1434NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1435static VALUE
1436vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1437{
1438 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1439
1440 // Cache hit case
1441 if (shape_id == dest_shape_id) {
1442 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1443 }
1444 else if (dest_shape_id != INVALID_SHAPE_ID) {
1445 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1446 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1447 }
1448 else {
1449 return Qundef;
1450 }
1451 }
1452 else {
1453 return Qundef;
1454 }
1455
1456 VALUE fields_obj = rb_obj_fields(obj, id);
1457 RUBY_ASSERT(fields_obj);
1458 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1459
1460 if (shape_id != dest_shape_id) {
1461 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1462 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1463 }
1464
1465 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1466
1467 return val;
1468}
1469
1470static inline VALUE
1471vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1472{
1473#if OPT_IC_FOR_IVAR
1474 switch (BUILTIN_TYPE(obj)) {
1475 case T_OBJECT:
1476 {
1477 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1478
1479 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1480 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1481
1482 if (LIKELY(shape_id == dest_shape_id)) {
1483 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1484 VM_ASSERT(!rb_ractor_shareable_p(obj));
1485 }
1486 else if (dest_shape_id != INVALID_SHAPE_ID) {
1487 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1488 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1489
1490 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1491
1492 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1493 }
1494 else {
1495 break;
1496 }
1497 }
1498 else {
1499 break;
1500 }
1501
1502 VALUE *ptr = ROBJECT_FIELDS(obj);
1503
1504 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1505 RB_OBJ_WRITE(obj, &ptr[index], val);
1506
1507 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1508 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1509 return val;
1510 }
1511 break;
1512 case T_CLASS:
1513 case T_MODULE:
1514 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1515 default:
1516 break;
1517 }
1518
1519 return Qundef;
1520#endif /* OPT_IC_FOR_IVAR */
1521}
1522
1523static VALUE
1524update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1525{
1526 VALUE defined_class = 0;
1527 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1528
1529 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1530 defined_class = RBASIC(defined_class)->klass;
1531 }
1532
1533 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1534 if (!rb_cvc_tbl) {
1535 rb_bug("the cvc table should be set");
1536 }
1537
1538 VALUE ent_data;
1539 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1540 rb_bug("should have cvar cache entry");
1541 }
1542
1543 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1544
1545 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1546 ent->cref = cref;
1547 ic->entry = ent;
1548
1549 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1550 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1551 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1552 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1553
1554 return cvar_value;
1555}
1556
1557static inline VALUE
1558vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1559{
1560 const rb_cref_t *cref;
1561 cref = vm_get_cref(GET_EP());
1562
1563 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1564 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1565
1566 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1567 RUBY_ASSERT(!UNDEF_P(v));
1568
1569 return v;
1570 }
1571
1572 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1573
1574 return update_classvariable_cache(iseq, klass, id, cref, ic);
1575}
1576
1577VALUE
1578rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1579{
1580 return vm_getclassvariable(iseq, cfp, id, ic);
1581}
1582
1583static inline void
1584vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1585{
1586 const rb_cref_t *cref;
1587 cref = vm_get_cref(GET_EP());
1588
1589 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1590 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1591
1592 rb_class_ivar_set(ic->entry->class_value, id, val);
1593 return;
1594 }
1595
1596 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1597
1598 rb_cvar_set(klass, id, val);
1599
1600 update_classvariable_cache(iseq, klass, id, cref, ic);
1601}
1602
1603void
1604rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1605{
1606 vm_setclassvariable(iseq, cfp, id, val, ic);
1607}
1608
1609static inline VALUE
1610vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1611{
1612 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1613}
1614
1615static inline void
1616vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1617{
1618 if (RB_SPECIAL_CONST_P(obj)) {
1620 return;
1621 }
1622
1623 shape_id_t dest_shape_id;
1624 attr_index_t index;
1625 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1626
1627 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1628 switch (BUILTIN_TYPE(obj)) {
1629 case T_OBJECT:
1630 case T_CLASS:
1631 case T_MODULE:
1632 break;
1633 default:
1634 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1635 return;
1636 }
1637 }
1638 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1639 }
1640}
1641
1642void
1643rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1644{
1645 vm_setinstancevariable(iseq, obj, id, val, ic);
1646}
1647
1648static VALUE
1649vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1650{
1651 /* continue throw */
1652
1653 if (FIXNUM_P(err)) {
1654 ec->tag->state = RUBY_TAG_FATAL;
1655 }
1656 else if (SYMBOL_P(err)) {
1657 ec->tag->state = TAG_THROW;
1658 }
1659 else if (THROW_DATA_P(err)) {
1660 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1661 }
1662 else {
1663 ec->tag->state = TAG_RAISE;
1664 }
1665 return err;
1666}
1667
1668static VALUE
1669vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1670 const int flag, const VALUE throwobj)
1671{
1672 const rb_control_frame_t *escape_cfp = NULL;
1673 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1674
1675 if (flag != 0) {
1676 /* do nothing */
1677 }
1678 else if (state == TAG_BREAK) {
1679 int is_orphan = 1;
1680 const VALUE *ep = GET_EP();
1681 const rb_iseq_t *base_iseq = GET_ISEQ();
1682 escape_cfp = reg_cfp;
1683
1684 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1685 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1686 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1687 ep = escape_cfp->ep;
1688 base_iseq = escape_cfp->iseq;
1689 }
1690 else {
1691 ep = VM_ENV_PREV_EP(ep);
1692 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1693 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1694 VM_ASSERT(escape_cfp->iseq == base_iseq);
1695 }
1696 }
1697
1698 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1699 /* lambda{... break ...} */
1700 is_orphan = 0;
1701 state = TAG_RETURN;
1702 }
1703 else {
1704 ep = VM_ENV_PREV_EP(ep);
1705
1706 while (escape_cfp < eocfp) {
1707 if (escape_cfp->ep == ep) {
1708 const rb_iseq_t *const iseq = escape_cfp->iseq;
1709 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1710 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1711 unsigned int i;
1712
1713 if (!ct) break;
1714 for (i=0; i < ct->size; i++) {
1715 const struct iseq_catch_table_entry *const entry =
1716 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1717
1718 if (entry->type == CATCH_TYPE_BREAK &&
1719 entry->iseq == base_iseq &&
1720 entry->start < epc && entry->end >= epc) {
1721 if (entry->cont == epc) { /* found! */
1722 is_orphan = 0;
1723 }
1724 break;
1725 }
1726 }
1727 break;
1728 }
1729
1730 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1731 }
1732 }
1733
1734 if (is_orphan) {
1735 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1736 }
1737 }
1738 else if (state == TAG_RETRY) {
1739 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1740
1741 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1742 }
1743 else if (state == TAG_RETURN) {
1744 const VALUE *current_ep = GET_EP();
1745 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1746 int in_class_frame = 0;
1747 int toplevel = 1;
1748 escape_cfp = reg_cfp;
1749
1750 // find target_lep, target_ep
1751 while (!VM_ENV_LOCAL_P(ep)) {
1752 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1753 target_ep = ep;
1754 }
1755 ep = VM_ENV_PREV_EP(ep);
1756 }
1757 target_lep = ep;
1758
1759 while (escape_cfp < eocfp) {
1760 const VALUE *lep = VM_CF_LEP(escape_cfp);
1761
1762 if (!target_lep) {
1763 target_lep = lep;
1764 }
1765
1766 if (lep == target_lep &&
1767 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1768 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1769 in_class_frame = 1;
1770 target_lep = 0;
1771 }
1772
1773 if (lep == target_lep) {
1774 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1775 toplevel = 0;
1776 if (in_class_frame) {
1777 /* lambda {class A; ... return ...; end} */
1778 goto valid_return;
1779 }
1780 else {
1781 const VALUE *tep = current_ep;
1782
1783 while (target_lep != tep) {
1784 if (escape_cfp->ep == tep) {
1785 /* in lambda */
1786 if (tep == target_ep) {
1787 goto valid_return;
1788 }
1789 else {
1790 goto unexpected_return;
1791 }
1792 }
1793 tep = VM_ENV_PREV_EP(tep);
1794 }
1795 }
1796 }
1797 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1798 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1799 case ISEQ_TYPE_TOP:
1800 case ISEQ_TYPE_MAIN:
1801 if (toplevel) {
1802 if (in_class_frame) goto unexpected_return;
1803 if (target_ep == NULL) {
1804 goto valid_return;
1805 }
1806 else {
1807 goto unexpected_return;
1808 }
1809 }
1810 break;
1811 case ISEQ_TYPE_EVAL: {
1812 const rb_iseq_t *is = escape_cfp->iseq;
1813 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1814 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1815 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1816 t = ISEQ_BODY(is)->type;
1817 }
1818 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1819 break;
1820 }
1821 case ISEQ_TYPE_CLASS:
1822 toplevel = 0;
1823 break;
1824 default:
1825 break;
1826 }
1827 }
1828 }
1829
1830 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1831 if (target_ep == NULL) {
1832 goto valid_return;
1833 }
1834 else {
1835 goto unexpected_return;
1836 }
1837 }
1838
1839 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1840 }
1841 unexpected_return:;
1842 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1843
1844 valid_return:;
1845 /* do nothing */
1846 }
1847 else {
1848 rb_bug("isns(throw): unsupported throw type");
1849 }
1850
1851 ec->tag->state = state;
1852 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1853}
1854
1855static VALUE
1856vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1857 rb_num_t throw_state, VALUE throwobj)
1858{
1859 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1860 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1861
1862 if (state != 0) {
1863 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1864 }
1865 else {
1866 return vm_throw_continue(ec, throwobj);
1867 }
1868}
1869
1870VALUE
1871rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1872{
1873 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1874}
1875
1876static inline void
1877vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1878{
1879 int is_splat = flag & 0x01;
1880 const VALUE *ptr;
1881 rb_num_t len;
1882 const VALUE obj = ary;
1883
1884 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1885 ary = obj;
1886 ptr = &ary;
1887 len = 1;
1888 }
1889 else {
1890 ptr = RARRAY_CONST_PTR(ary);
1891 len = (rb_num_t)RARRAY_LEN(ary);
1892 }
1893
1894 if (num + is_splat == 0) {
1895 /* no space left on stack */
1896 }
1897 else if (flag & 0x02) {
1898 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1899 rb_num_t i = 0, j;
1900
1901 if (len < num) {
1902 for (i = 0; i < num - len; i++) {
1903 *cfp->sp++ = Qnil;
1904 }
1905 }
1906
1907 for (j = 0; i < num; i++, j++) {
1908 VALUE v = ptr[len - j - 1];
1909 *cfp->sp++ = v;
1910 }
1911
1912 if (is_splat) {
1913 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1914 }
1915 }
1916 else {
1917 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1918 if (is_splat) {
1919 if (num > len) {
1920 *cfp->sp++ = rb_ary_new();
1921 }
1922 else {
1923 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1924 }
1925 }
1926
1927 if (num > len) {
1928 rb_num_t i = 0;
1929 for (; i < num - len; i++) {
1930 *cfp->sp++ = Qnil;
1931 }
1932
1933 for (rb_num_t j = 0; i < num; i++, j++) {
1934 *cfp->sp++ = ptr[len - j - 1];
1935 }
1936 }
1937 else {
1938 for (rb_num_t j = 0; j < num; j++) {
1939 *cfp->sp++ = ptr[num - j - 1];
1940 }
1941 }
1942 }
1943
1944 RB_GC_GUARD(ary);
1945}
1946
1947static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1948
1949static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1950
1951static struct rb_class_cc_entries *
1952vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1953{
1954 int initial_capa = 2;
1955 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
1956#if VM_CHECK_MODE > 0
1957 ccs->debug_sig = ~(VALUE)ccs;
1958#endif
1959 ccs->capa = initial_capa;
1960 ccs->len = 0;
1961 ccs->cme = cme;
1962 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1963
1964 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1965 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
1966 return ccs;
1967}
1968
1969static void
1970vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
1971{
1972 if (! vm_cc_markable(cc)) {
1973 return;
1974 }
1975
1976 if (UNLIKELY(ccs->len == ccs->capa)) {
1977 RUBY_ASSERT(ccs->capa > 0);
1978 ccs->capa *= 2;
1979 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
1980#if VM_CHECK_MODE > 0
1981 ccs->debug_sig = ~(VALUE)ccs;
1982#endif
1983 // GC?
1984 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1985 }
1986 VM_ASSERT(ccs->len < ccs->capa);
1987
1988 const int pos = ccs->len++;
1989 ccs->entries[pos].argc = vm_ci_argc(ci);
1990 ccs->entries[pos].flag = vm_ci_flag(ci);
1991 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
1992
1993 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
1994 // for tuning
1995 // vm_mtbl_dump(klass, 0);
1996 }
1997}
1998
1999#if VM_CHECK_MODE > 0
2000void
2001rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2002{
2003 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2004 for (int i=0; i<ccs->len; i++) {
2005 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2006 ccs->entries[i].flag,
2007 ccs->entries[i].argc);
2008 rp(ccs->entries[i].cc);
2009 }
2010}
2011
2012static int
2013vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2014{
2015 VM_ASSERT(vm_ccs_p(ccs));
2016 VM_ASSERT(ccs->len <= ccs->capa);
2017
2018 for (int i=0; i<ccs->len; i++) {
2019 const struct rb_callcache *cc = ccs->entries[i].cc;
2020
2021 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2022 VM_ASSERT(vm_cc_class_check(cc, klass));
2023 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2024 VM_ASSERT(!vm_cc_super_p(cc));
2025 VM_ASSERT(!vm_cc_refinement_p(cc));
2026 }
2027 return TRUE;
2028}
2029#endif
2030
2031const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2032
2033static void
2034vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2035{
2036 ASSERT_vm_locking();
2037
2038 if (rb_multi_ractor_p()) {
2039 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2040 // Another ractor updated the CC table while we were waiting on the VM lock.
2041 // We have to retry.
2042 return;
2043 }
2044
2045 struct rb_class_cc_entries *ccs = NULL;
2046 rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs);
2047
2048 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2049 // Another ractor replaced that entry while we were waiting on the VM lock.
2050 return;
2051 }
2052
2053 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2054 rb_vm_cc_table_delete(new_table, mid);
2055 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2056 }
2057 else {
2058 rb_vm_cc_table_delete(cc_tbl, mid);
2059 }
2060}
2061
2062static const struct rb_callcache *
2063vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2064{
2065 ASSERT_vm_locking();
2066
2067 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2068 const VALUE original_cc_table = cc_tbl;
2069 struct rb_class_cc_entries *ccs = NULL;
2070
2071 if (!cc_tbl) {
2072 cc_tbl = rb_vm_cc_table_create(1);
2073 }
2074 else if (rb_multi_ractor_p()) {
2075 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2076 }
2077
2078 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2079
2080 const rb_callable_method_entry_t *cme;
2081
2082 if (ccs) {
2083 cme = ccs->cme;
2084 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2085
2086 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2087 }
2088 else {
2089 cme = rb_callable_method_entry(klass, mid);
2090 }
2091
2092 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2093
2094 if (cme == NULL) {
2095 // undef or not found: can't cache the information
2096 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2097 return &vm_empty_cc;
2098 }
2099
2100 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2101
2102 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2103
2104 if (ccs == NULL) {
2105 VM_ASSERT(cc_tbl);
2106
2107 if (!LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs))) {
2108 // TODO: required?
2109 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2110 }
2111 }
2112
2113 cme = rb_check_overloaded_cme(cme, ci);
2114
2115 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2116 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2117
2118 VM_ASSERT(vm_cc_cme(cc) != NULL);
2119 VM_ASSERT(cme->called_id == mid);
2120 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2121
2122 if (original_cc_table != cc_tbl) {
2123 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2124 }
2125
2126 return cc;
2127}
2128
2129static const struct rb_callcache *
2130vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2131{
2132 VALUE cc_tbl;
2133 struct rb_class_cc_entries *ccs;
2134retry:
2135 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2136 ccs = NULL;
2137
2138 if (cc_tbl) {
2139 // CCS data is keyed on method id, so we don't need the method id
2140 // for doing comparisons in the `for` loop below.
2141
2142 if (rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
2143 const int ccs_len = ccs->len;
2144
2145 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2146 RB_VM_LOCKING() {
2147 vm_evict_cc(klass, cc_tbl, mid);
2148 }
2149 goto retry;
2150 }
2151 else {
2152 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2153
2154 // We already know the method id is correct because we had
2155 // to look up the ccs_data by method id. All we need to
2156 // compare is argc and flag
2157 unsigned int argc = vm_ci_argc(ci);
2158 unsigned int flag = vm_ci_flag(ci);
2159
2160 for (int i=0; i<ccs_len; i++) {
2161 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2162 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2163 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2164
2165 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2166
2167 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2168 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2169
2170 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2171 VM_ASSERT(ccs_cc->klass == klass);
2172 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2173
2174 return ccs_cc;
2175 }
2176 }
2177 }
2178 }
2179 }
2180
2181 RB_GC_GUARD(cc_tbl);
2182 return NULL;
2183}
2184
2185static const struct rb_callcache *
2186vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2187{
2188 const ID mid = vm_ci_mid(ci);
2189
2190 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2191 if (cc) {
2192 return cc;
2193 }
2194
2195 RB_VM_LOCKING() {
2196 if (rb_multi_ractor_p()) {
2197 // The CC may have been populated by another ractor while we were waiting on the lock,
2198 // so we must lookup a second time.
2199 cc = vm_lookup_cc(klass, ci, mid);
2200 }
2201
2202 if (!cc) {
2203 cc = vm_populate_cc(klass, ci, mid);
2204 }
2205 }
2206
2207 return cc;
2208}
2209
2210const struct rb_callcache *
2211rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2212{
2213 const struct rb_callcache *cc;
2214
2215 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2216
2217 cc = vm_search_cc(klass, ci);
2218
2219 VM_ASSERT(cc);
2220 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2221 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2222 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2223 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2224 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2225
2226 return cc;
2227}
2228
2229static const struct rb_callcache *
2230vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2231{
2232#if USE_DEBUG_COUNTER
2233 const struct rb_callcache *old_cc = cd->cc;
2234#endif
2235
2236 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2237
2238#if OPT_INLINE_METHOD_CACHE
2239 cd->cc = cc;
2240
2241 const struct rb_callcache *empty_cc = &vm_empty_cc;
2242 if (cd_owner && cc != empty_cc) {
2243 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2244 }
2245
2246#if USE_DEBUG_COUNTER
2247 if (!old_cc || old_cc == empty_cc) {
2248 // empty
2249 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2250 }
2251 else if (old_cc == cc) {
2252 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2253 }
2254 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2255 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2256 }
2257 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2258 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2259 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2260 }
2261 else {
2262 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2263 }
2264#endif
2265#endif // OPT_INLINE_METHOD_CACHE
2266
2267 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2268 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2269
2270 return cc;
2271}
2272
2273ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2274static const struct rb_callcache *
2275vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2276{
2277 const struct rb_callcache *cc = cd->cc;
2278
2279#if OPT_INLINE_METHOD_CACHE
2280 if (LIKELY(vm_cc_class_check(cc, klass))) {
2281 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2282 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2283 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2284 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2285 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2286 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2287
2288 return cc;
2289 }
2290 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2291 }
2292 else {
2293 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2294 }
2295#endif
2296
2297 return vm_search_method_slowpath0(cd_owner, cd, klass);
2298}
2299
2300static const struct rb_callable_method_entry_struct *
2301vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2302{
2303 VALUE klass = CLASS_OF(recv);
2304 VM_ASSERT(klass != Qfalse);
2305 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2306
2307 const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
2308 return vm_cc_cme(cc);
2309}
2310
2311#if __has_attribute(transparent_union)
2312typedef union {
2313 VALUE (*anyargs)(ANYARGS);
2314 VALUE (*f00)(VALUE);
2315 VALUE (*f01)(VALUE, VALUE);
2316 VALUE (*f02)(VALUE, VALUE, VALUE);
2317 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2318 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2319 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2320 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2321 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2330 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2331} __attribute__((__transparent_union__)) cfunc_type;
2332# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2333#else
2334typedef VALUE (*cfunc_type)(ANYARGS);
2335# define make_cfunc_type(f) (cfunc_type)(f)
2336#endif
2337
2338static inline int
2339check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2340{
2341 if (! me) {
2342 return false;
2343 }
2344 else {
2345 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2346 VM_ASSERT(callable_method_entry_p(me));
2347 VM_ASSERT(me->def);
2348 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2349 return false;
2350 }
2351 else {
2352#if __has_attribute(transparent_union)
2353 return me->def->body.cfunc.func == func.anyargs;
2354#else
2355 return me->def->body.cfunc.func == func;
2356#endif
2357 }
2358 }
2359}
2360
2361static inline int
2362check_method_basic_definition(const rb_callable_method_entry_t *me)
2363{
2364 return me && METHOD_ENTRY_BASIC(me);
2365}
2366
2367static inline int
2368vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2369{
2370 VM_ASSERT(iseq != NULL);
2371 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
2372 return check_cfunc(cme, func);
2373}
2374
2375#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2376#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2377
2378#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2379
2380static inline bool
2381FIXNUM_2_P(VALUE a, VALUE b)
2382{
2383 /* FIXNUM_P(a) && FIXNUM_P(b)
2384 * == ((a & 1) && (b & 1))
2385 * == a & b & 1 */
2386 SIGNED_VALUE x = a;
2387 SIGNED_VALUE y = b;
2388 SIGNED_VALUE z = x & y & 1;
2389 return z == 1;
2390}
2391
2392static inline bool
2393FLONUM_2_P(VALUE a, VALUE b)
2394{
2395#if USE_FLONUM
2396 /* FLONUM_P(a) && FLONUM_P(b)
2397 * == ((a & 3) == 2) && ((b & 3) == 2)
2398 * == ! ((a ^ 2) | (b ^ 2) & 3)
2399 */
2400 SIGNED_VALUE x = a;
2401 SIGNED_VALUE y = b;
2402 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2403 return !z;
2404#else
2405 return false;
2406#endif
2407}
2408
2409static VALUE
2410opt_equality_specialized(VALUE recv, VALUE obj)
2411{
2412 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2413 goto compare_by_identity;
2414 }
2415 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2416 goto compare_by_identity;
2417 }
2418 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2419 goto compare_by_identity;
2420 }
2421 else if (SPECIAL_CONST_P(recv)) {
2422 //
2423 }
2424 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2425 double a = RFLOAT_VALUE(recv);
2426 double b = RFLOAT_VALUE(obj);
2427
2428#if MSC_VERSION_BEFORE(1300)
2429 if (isnan(a)) {
2430 return Qfalse;
2431 }
2432 else if (isnan(b)) {
2433 return Qfalse;
2434 }
2435 else
2436#endif
2437 return RBOOL(a == b);
2438 }
2439 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2440 if (recv == obj) {
2441 return Qtrue;
2442 }
2443 else if (RB_TYPE_P(obj, T_STRING)) {
2444 return rb_str_eql_internal(obj, recv);
2445 }
2446 }
2447 return Qundef;
2448
2449 compare_by_identity:
2450 return RBOOL(recv == obj);
2451}
2452
2453static VALUE
2454opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2455{
2456 VM_ASSERT(cd_owner != NULL);
2457
2458 VALUE val = opt_equality_specialized(recv, obj);
2459 if (!UNDEF_P(val)) return val;
2460
2461 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2462 return Qundef;
2463 }
2464 else {
2465 return RBOOL(recv == obj);
2466 }
2467}
2468
2469#undef EQ_UNREDEFINED_P
2470
2471static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2472NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2473
2474static VALUE
2475opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2476{
2477 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2478
2479 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2480 return RBOOL(recv == obj);
2481 }
2482 else {
2483 return Qundef;
2484 }
2485}
2486
2487static VALUE
2488opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2489{
2490 VALUE val = opt_equality_specialized(recv, obj);
2491 if (!UNDEF_P(val)) {
2492 return val;
2493 }
2494 else {
2495 return opt_equality_by_mid_slowpath(recv, obj, mid);
2496 }
2497}
2498
2499VALUE
2500rb_equal_opt(VALUE obj1, VALUE obj2)
2501{
2502 return opt_equality_by_mid(obj1, obj2, idEq);
2503}
2504
2505VALUE
2506rb_eql_opt(VALUE obj1, VALUE obj2)
2507{
2508 return opt_equality_by_mid(obj1, obj2, idEqlP);
2509}
2510
2511extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2512extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2513
2514static VALUE
2515check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2516{
2517 switch (type) {
2518 case VM_CHECKMATCH_TYPE_WHEN:
2519 return pattern;
2520 case VM_CHECKMATCH_TYPE_RESCUE:
2521 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2522 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2523 }
2524 /* fall through */
2525 case VM_CHECKMATCH_TYPE_CASE: {
2526 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2527 }
2528 default:
2529 rb_bug("check_match: unreachable");
2530 }
2531}
2532
2533
2534#if MSC_VERSION_BEFORE(1300)
2535#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2536#else
2537#define CHECK_CMP_NAN(a, b) /* do nothing */
2538#endif
2539
2540static inline VALUE
2541double_cmp_lt(double a, double b)
2542{
2543 CHECK_CMP_NAN(a, b);
2544 return RBOOL(a < b);
2545}
2546
2547static inline VALUE
2548double_cmp_le(double a, double b)
2549{
2550 CHECK_CMP_NAN(a, b);
2551 return RBOOL(a <= b);
2552}
2553
2554static inline VALUE
2555double_cmp_gt(double a, double b)
2556{
2557 CHECK_CMP_NAN(a, b);
2558 return RBOOL(a > b);
2559}
2560
2561static inline VALUE
2562double_cmp_ge(double a, double b)
2563{
2564 CHECK_CMP_NAN(a, b);
2565 return RBOOL(a >= b);
2566}
2567
2568// Copied by vm_dump.c
2569static inline VALUE *
2570vm_base_ptr(const rb_control_frame_t *cfp)
2571{
2572 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2573
2574 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2575 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2576
2577 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2578 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2579 int params = ISEQ_BODY(cfp->iseq)->param.size;
2580
2581 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2582 bp += vm_ci_argc(ci);
2583 }
2584
2585 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2586 /* adjust `self' */
2587 bp += 1;
2588 }
2589#if VM_DEBUG_BP_CHECK
2590 if (bp != cfp->bp_check) {
2591 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2592 (long)(cfp->bp_check - GET_EC()->vm_stack),
2593 (long)(bp - GET_EC()->vm_stack));
2594 rb_bug("vm_base_ptr: unreachable");
2595 }
2596#endif
2597 return bp;
2598 }
2599 else {
2600 return NULL;
2601 }
2602}
2603
2604VALUE *
2605rb_vm_base_ptr(const rb_control_frame_t *cfp)
2606{
2607 return vm_base_ptr(cfp);
2608}
2609
2610/* method call processes with call_info */
2611
2612#include "vm_args.c"
2613
2614static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2615ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2616static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2617static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2618static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2619static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2620static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2621
2622static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2623
2624static VALUE
2625vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2626{
2627 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2628
2629 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2630}
2631
2632static VALUE
2633vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2634{
2635 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2636
2637 const struct rb_callcache *cc = calling->cc;
2638 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2639 int param = ISEQ_BODY(iseq)->param.size;
2640 int local = ISEQ_BODY(iseq)->local_table_size;
2641 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2642}
2643
2644bool
2645rb_simple_iseq_p(const rb_iseq_t *iseq)
2646{
2647 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2648 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2649 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2650 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2651 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2652 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2653 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2654 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2655}
2656
2657bool
2658rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2659{
2660 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2661 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2662 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2663 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2664 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2665 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2666 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2667 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2668}
2669
2670bool
2671rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2672{
2673 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2674 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2675 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2676 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2677 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2678 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2679 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2680}
2681
2682#define ALLOW_HEAP_ARGV (-2)
2683#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2684
2685static inline bool
2686vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2687{
2688 vm_check_canary(GET_EC(), cfp->sp);
2689 bool ret = false;
2690
2691 if (!NIL_P(ary)) {
2692 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2693 long len = RARRAY_LEN(ary);
2694 int argc = calling->argc;
2695
2696 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2697 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2698 * a temporary array, instead of trying to keeping arguments on the VM stack.
2699 */
2700 VALUE *argv = cfp->sp - argc;
2701 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2702 rb_ary_cat(argv_ary, argv, argc);
2703 rb_ary_cat(argv_ary, ptr, len);
2704 cfp->sp -= argc - 1;
2705 cfp->sp[-1] = argv_ary;
2706 calling->argc = 1;
2707 calling->heap_argv = argv_ary;
2708 RB_GC_GUARD(ary);
2709 }
2710 else {
2711 long i;
2712
2713 if (max_args >= 0 && len + argc > max_args) {
2714 /* If only a given max_args is allowed, copy up to max args.
2715 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2716 * where additional arguments are ignored.
2717 *
2718 * Also, copy up to one more argument than the maximum,
2719 * in case it is an empty keyword hash that will be removed.
2720 */
2721 calling->argc += len - (max_args - argc + 1);
2722 len = max_args - argc + 1;
2723 ret = true;
2724 }
2725 else {
2726 /* Unset heap_argv if set originally. Can happen when
2727 * forwarding modified arguments, where heap_argv was used
2728 * originally, but heap_argv not supported by the forwarded
2729 * method in all cases.
2730 */
2731 calling->heap_argv = 0;
2732 }
2733 CHECK_VM_STACK_OVERFLOW(cfp, len);
2734
2735 for (i = 0; i < len; i++) {
2736 *cfp->sp++ = ptr[i];
2737 }
2738 calling->argc += i;
2739 }
2740 }
2741
2742 return ret;
2743}
2744
2745static inline void
2746vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2747{
2748 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2749 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2750 const VALUE h = rb_hash_new_with_size(kw_len);
2751 VALUE *sp = cfp->sp;
2752 int i;
2753
2754 for (i=0; i<kw_len; i++) {
2755 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2756 }
2757 (sp-kw_len)[0] = h;
2758
2759 cfp->sp -= kw_len - 1;
2760 calling->argc -= kw_len - 1;
2761 calling->kw_splat = 1;
2762}
2763
2764static inline VALUE
2765vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2766{
2767 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2768 if (keyword_hash != Qnil) {
2769 /* Convert a non-hash keyword splat to a new hash */
2770 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2771 }
2772 }
2773 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2774 /* Convert a hash keyword splat to a new hash unless
2775 * a mutable keyword splat was passed.
2776 * Skip allocating new hash for empty keyword splat, as empty
2777 * keyword splat will be ignored by both callers.
2778 */
2779 keyword_hash = rb_hash_dup(keyword_hash);
2780 }
2781 return keyword_hash;
2782}
2783
2784static inline void
2785CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2786 struct rb_calling_info *restrict calling,
2787 const struct rb_callinfo *restrict ci, int max_args)
2788{
2789 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2790 if (IS_ARGS_KW_SPLAT(ci)) {
2791 // f(*a, **kw)
2792 VM_ASSERT(calling->kw_splat == 1);
2793
2794 cfp->sp -= 2;
2795 calling->argc -= 2;
2796 VALUE ary = cfp->sp[0];
2797 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2798
2799 // splat a
2800 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2801
2802 // put kw
2803 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2804 if (UNLIKELY(calling->heap_argv)) {
2805 rb_ary_push(calling->heap_argv, kwh);
2806 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2807 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2808 calling->kw_splat = 0;
2809 }
2810 }
2811 else {
2812 cfp->sp[0] = kwh;
2813 cfp->sp++;
2814 calling->argc++;
2815
2816 VM_ASSERT(calling->kw_splat == 1);
2817 }
2818 }
2819 else {
2820 calling->kw_splat = 0;
2821 }
2822 }
2823 else {
2824 // f(*a)
2825 VM_ASSERT(calling->kw_splat == 0);
2826
2827 cfp->sp -= 1;
2828 calling->argc -= 1;
2829 VALUE ary = cfp->sp[0];
2830
2831 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2832 goto check_keyword;
2833 }
2834
2835 // check the last argument
2836 VALUE last_hash, argv_ary;
2837 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2838 if (!IS_ARGS_KEYWORD(ci) &&
2839 RARRAY_LEN(argv_ary) > 0 &&
2840 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2841 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2842
2843 rb_ary_pop(argv_ary);
2844 if (!RHASH_EMPTY_P(last_hash)) {
2845 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2846 calling->kw_splat = 1;
2847 }
2848 }
2849 }
2850 else {
2851check_keyword:
2852 if (!IS_ARGS_KEYWORD(ci) &&
2853 calling->argc > 0 &&
2854 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2855 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2856
2857 if (RHASH_EMPTY_P(last_hash)) {
2858 calling->argc--;
2859 cfp->sp -= 1;
2860 }
2861 else {
2862 cfp->sp[-1] = rb_hash_dup(last_hash);
2863 calling->kw_splat = 1;
2864 }
2865 }
2866 }
2867 }
2868 }
2869 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2870 // f(**kw)
2871 VM_ASSERT(calling->kw_splat == 1);
2872 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2873
2874 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2875 cfp->sp--;
2876 calling->argc--;
2877 calling->kw_splat = 0;
2878 }
2879 else {
2880 cfp->sp[-1] = kwh;
2881 }
2882 }
2883 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2884 // f(k1:1, k2:2)
2885 VM_ASSERT(calling->kw_splat == 0);
2886
2887 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2888 * by creating a keyword hash.
2889 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2890 */
2891 vm_caller_setup_arg_kw(cfp, calling, ci);
2892 }
2893}
2894
2895#define USE_OPT_HIST 0
2896
2897#if USE_OPT_HIST
2898#define OPT_HIST_MAX 64
2899static int opt_hist[OPT_HIST_MAX+1];
2900
2901__attribute__((destructor))
2902static void
2903opt_hist_show_results_at_exit(void)
2904{
2905 for (int i=0; i<OPT_HIST_MAX; i++) {
2906 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2907 }
2908}
2909#endif
2910
2911static VALUE
2912vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2913 struct rb_calling_info *calling)
2914{
2915 const struct rb_callcache *cc = calling->cc;
2916 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2917 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2918 const int opt = calling->argc - lead_num;
2919 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2920 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2921 const int param = ISEQ_BODY(iseq)->param.size;
2922 const int local = ISEQ_BODY(iseq)->local_table_size;
2923 const int delta = opt_num - opt;
2924
2925 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2926
2927#if USE_OPT_HIST
2928 if (opt_pc < OPT_HIST_MAX) {
2929 opt_hist[opt]++;
2930 }
2931 else {
2932 opt_hist[OPT_HIST_MAX]++;
2933 }
2934#endif
2935
2936 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2937}
2938
2939static VALUE
2940vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2941 struct rb_calling_info *calling)
2942{
2943 const struct rb_callcache *cc = calling->cc;
2944 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2945 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2946 const int opt = calling->argc - lead_num;
2947 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2948
2949 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2950
2951#if USE_OPT_HIST
2952 if (opt_pc < OPT_HIST_MAX) {
2953 opt_hist[opt]++;
2954 }
2955 else {
2956 opt_hist[OPT_HIST_MAX]++;
2957 }
2958#endif
2959
2960 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2961}
2962
2963static void
2964args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
2965 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2966 VALUE *const locals);
2967
2968static VALUE
2969vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2970 struct rb_calling_info *calling)
2971{
2972 const struct rb_callcache *cc = calling->cc;
2973 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2974 int param_size = ISEQ_BODY(iseq)->param.size;
2975 int local_size = ISEQ_BODY(iseq)->local_table_size;
2976
2977 // Setting up local size and param size
2978 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2979
2980 local_size = local_size + vm_ci_argc(calling->cd->ci);
2981 param_size = param_size + vm_ci_argc(calling->cd->ci);
2982
2983 cfp->sp[0] = (VALUE)calling->cd->ci;
2984
2985 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2986}
2987
2988static VALUE
2989vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2990 struct rb_calling_info *calling)
2991{
2992 const struct rb_callinfo *ci = calling->cd->ci;
2993 const struct rb_callcache *cc = calling->cc;
2994
2995 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2996 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2997
2998 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2999 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3000 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3001 const int ci_kw_len = kw_arg->keyword_len;
3002 const VALUE * const ci_keywords = kw_arg->keywords;
3003 VALUE *argv = cfp->sp - calling->argc;
3004 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3005 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3006 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3007 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3008 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3009
3010 int param = ISEQ_BODY(iseq)->param.size;
3011 int local = ISEQ_BODY(iseq)->local_table_size;
3012 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3013}
3014
3015static VALUE
3016vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3017 struct rb_calling_info *calling)
3018{
3019 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3020 const struct rb_callcache *cc = calling->cc;
3021
3022 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3023 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3024
3025 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3026 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3027 VALUE * const argv = cfp->sp - calling->argc;
3028 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3029
3030 int i;
3031 for (i=0; i<kw_param->num; i++) {
3032 klocals[i] = kw_param->default_values[i];
3033 }
3034 klocals[i] = INT2FIX(0); // kw specify flag
3035 // NOTE:
3036 // nobody check this value, but it should be cleared because it can
3037 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3038
3039 int param = ISEQ_BODY(iseq)->param.size;
3040 int local = ISEQ_BODY(iseq)->local_table_size;
3041 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3042}
3043
3044static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3045
3046static VALUE
3047vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3048 struct rb_calling_info *calling)
3049{
3050 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3051 cfp->sp -= (calling->argc + 1);
3052 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3053 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3054}
3055
3056VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3057
3058static void
3059warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3060{
3061 rb_vm_t *vm = GET_VM();
3062 set_table *dup_check_table = vm->unused_block_warning_table;
3063 st_data_t key;
3064 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3065
3066 union {
3067 VALUE v;
3068 unsigned char b[SIZEOF_VALUE];
3069 } k1 = {
3070 .v = (VALUE)pc,
3071 }, k2 = {
3072 .v = (VALUE)cme->def,
3073 };
3074
3075 // relax check
3076 if (!strict_unused_block) {
3077 key = (st_data_t)cme->def->original_id;
3078
3079 if (set_table_lookup(dup_check_table, key)) {
3080 return;
3081 }
3082 }
3083
3084 // strict check
3085 // make unique key from pc and me->def pointer
3086 key = 0;
3087 for (int i=0; i<SIZEOF_VALUE; i++) {
3088 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3089 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3090 }
3091
3092 if (0) {
3093 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3094 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3095 fprintf(stderr, "key:%p\n", (void *)key);
3096 }
3097
3098 // duplication check
3099 if (set_insert(dup_check_table, key)) {
3100 // already shown
3101 }
3102 else if (RTEST(ruby_verbose) || strict_unused_block) {
3103 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3104 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3105
3106 if (!NIL_P(m_loc)) {
3107 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3108 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3109 }
3110 else {
3111 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3112 }
3113 }
3114}
3115
3116static inline int
3117vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3118 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3119{
3120 const struct rb_callinfo *ci = calling->cd->ci;
3121 const struct rb_callcache *cc = calling->cc;
3122
3123 VM_ASSERT((vm_ci_argc(ci), 1));
3124 VM_ASSERT(vm_cc_cme(cc) != NULL);
3125
3126 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3127 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3128 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3129 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3130 }
3131
3132 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3133 if (LIKELY(rb_simple_iseq_p(iseq))) {
3134 rb_control_frame_t *cfp = ec->cfp;
3135 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3136 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3137
3138 if (calling->argc != lead_num) {
3139 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3140 }
3141
3142 //VM_ASSERT(ci == calling->cd->ci);
3143 VM_ASSERT(cc == calling->cc);
3144
3145 if (vm_call_iseq_optimizable_p(ci, cc)) {
3146 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3147 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3148 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3149 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3150 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3151 }
3152 else {
3153 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3154 }
3155 }
3156 return 0;
3157 }
3158 else if (rb_iseq_only_optparam_p(iseq)) {
3159 rb_control_frame_t *cfp = ec->cfp;
3160
3161 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3162 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3163
3164 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3165 const int argc = calling->argc;
3166 const int opt = argc - lead_num;
3167
3168 if (opt < 0 || opt > opt_num) {
3169 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3170 }
3171
3172 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3173 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3174 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3175 vm_call_cacheable(ci, cc));
3176 }
3177 else {
3178 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3179 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3180 vm_call_cacheable(ci, cc));
3181 }
3182
3183 /* initialize opt vars for self-references */
3184 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3185 for (int i=argc; i<lead_num + opt_num; i++) {
3186 argv[i] = Qnil;
3187 }
3188 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3189 }
3190 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3191 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3192 const int argc = calling->argc;
3193 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3194
3195 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3196 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3197
3198 if (argc - kw_arg->keyword_len == lead_num) {
3199 const int ci_kw_len = kw_arg->keyword_len;
3200 const VALUE * const ci_keywords = kw_arg->keywords;
3201 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3202 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3203
3204 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3205 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3206
3207 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3208 vm_call_cacheable(ci, cc));
3209
3210 return 0;
3211 }
3212 }
3213 else if (argc == lead_num) {
3214 /* no kwarg */
3215 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3216 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3217
3218 if (klocals[kw_param->num] == INT2FIX(0)) {
3219 /* copy from default_values */
3220 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3221 vm_call_cacheable(ci, cc));
3222 }
3223
3224 return 0;
3225 }
3226 }
3227 }
3228
3229 // Called iseq is using ... param
3230 // def foo(...) # <- iseq for foo will have "forwardable"
3231 //
3232 // We want to set the `...` local to the caller's CI
3233 // foo(1, 2) # <- the ci for this should end up as `...`
3234 //
3235 // So hopefully the stack looks like:
3236 //
3237 // => 1
3238 // => 2
3239 // => *
3240 // => **
3241 // => &
3242 // => ... # <- points at `foo`s CI
3243 // => cref_or_me
3244 // => specval
3245 // => type
3246 //
3247 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3248 bool can_fastpath = true;
3249
3250 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3251 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3252 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3253 ci = vm_ci_new_runtime(
3254 vm_ci_mid(ci),
3255 vm_ci_flag(ci),
3256 vm_ci_argc(ci),
3257 vm_ci_kwarg(ci));
3258 }
3259 else {
3260 ci = forward_cd->caller_ci;
3261 }
3262 can_fastpath = false;
3263 }
3264 // C functions calling iseqs will stack allocate a CI,
3265 // so we need to convert it to heap allocated
3266 if (!vm_ci_markable(ci)) {
3267 ci = vm_ci_new_runtime(
3268 vm_ci_mid(ci),
3269 vm_ci_flag(ci),
3270 vm_ci_argc(ci),
3271 vm_ci_kwarg(ci));
3272 can_fastpath = false;
3273 }
3274 argv[param_size - 1] = (VALUE)ci;
3275 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3276 return 0;
3277 }
3278
3279 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3280}
3281
3282static void
3283vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3284{
3285 // This case is when the caller is using a ... parameter.
3286 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3287 // In this case the caller's caller's CI will be on the stack.
3288 //
3289 // For example:
3290 //
3291 // def bar(a, b); a + b; end
3292 // def foo(...); bar(...); end
3293 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3294 //
3295 // Stack layout will be:
3296 //
3297 // > 1
3298 // > 2
3299 // > CI for foo(1, 2)
3300 // > cref_or_me
3301 // > specval
3302 // > type
3303 // > receiver
3304 // > CI for foo(1, 2), via `getlocal ...`
3305 // > ( SP points here )
3306 const VALUE * lep = VM_CF_LEP(cfp);
3307
3308 const rb_iseq_t *iseq;
3309
3310 // If we're in an escaped environment (lambda for example), get the iseq
3311 // from the captured env.
3312 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3313 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3314 iseq = env->iseq;
3315 }
3316 else { // Otherwise use the lep to find the caller
3317 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3318 }
3319
3320 // Our local storage is below the args we need to copy
3321 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3322
3323 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3324 VALUE * to = cfp->sp - 1; // clobber the CI
3325
3326 if (RTEST(splat)) {
3327 to -= 1; // clobber the splat array
3328 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3329 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3330 to += RARRAY_LEN(splat);
3331 }
3332
3333 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3334 MEMCPY(to, from, VALUE, argc);
3335 cfp->sp = to + argc;
3336
3337 // Stack layout should now be:
3338 //
3339 // > 1
3340 // > 2
3341 // > CI for foo(1, 2)
3342 // > cref_or_me
3343 // > specval
3344 // > type
3345 // > receiver
3346 // > 1
3347 // > 2
3348 // > ( SP points here )
3349}
3350
3351static VALUE
3352vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3353{
3354 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3355
3356 const struct rb_callcache *cc = calling->cc;
3357 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3358 int param_size = ISEQ_BODY(iseq)->param.size;
3359 int local_size = ISEQ_BODY(iseq)->local_table_size;
3360
3361 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3362
3363 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3364 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3365}
3366
3367static VALUE
3368vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3369{
3370 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3371
3372 const struct rb_callcache *cc = calling->cc;
3373 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3374 int param_size = ISEQ_BODY(iseq)->param.size;
3375 int local_size = ISEQ_BODY(iseq)->local_table_size;
3376
3377 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3378
3379 // Setting up local size and param size
3380 local_size = local_size + vm_ci_argc(calling->cd->ci);
3381 param_size = param_size + vm_ci_argc(calling->cd->ci);
3382
3383 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3384 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3385}
3386
3387static inline VALUE
3388vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3389 int opt_pc, int param_size, int local_size)
3390{
3391 const struct rb_callinfo *ci = calling->cd->ci;
3392 const struct rb_callcache *cc = calling->cc;
3393
3394 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3395 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3396 }
3397 else {
3398 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3399 }
3400}
3401
3402static inline VALUE
3403vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3404 int opt_pc, int param_size, int local_size)
3405{
3406 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3407 VALUE *argv = cfp->sp - calling->argc;
3408 VALUE *sp = argv + param_size;
3409 cfp->sp = argv - 1 /* recv */;
3410
3411 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3412 calling->block_handler, (VALUE)me,
3413 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3414 local_size - param_size,
3415 ISEQ_BODY(iseq)->stack_max);
3416 return Qundef;
3417}
3418
3419static inline VALUE
3420vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3421{
3422 const struct rb_callcache *cc = calling->cc;
3423 unsigned int i;
3424 VALUE *argv = cfp->sp - calling->argc;
3425 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3426 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3427 VALUE *src_argv = argv;
3428 VALUE *sp_orig, *sp;
3429 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3430
3431 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3432 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3433 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3434 dst_captured->code.val = src_captured->code.val;
3435 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3436 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3437 }
3438 else {
3439 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3440 }
3441 }
3442
3443 vm_pop_frame(ec, cfp, cfp->ep);
3444 cfp = ec->cfp;
3445
3446 sp_orig = sp = cfp->sp;
3447
3448 /* push self */
3449 sp[0] = calling->recv;
3450 sp++;
3451
3452 /* copy arguments */
3453 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3454 *sp++ = src_argv[i];
3455 }
3456
3457 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3458 calling->recv, calling->block_handler, (VALUE)me,
3459 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3460 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3461 ISEQ_BODY(iseq)->stack_max);
3462
3463 cfp->sp = sp_orig;
3464
3465 return Qundef;
3466}
3467
3468static void
3469ractor_unsafe_check(void)
3470{
3471 if (!rb_ractor_main_p()) {
3472 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3473 }
3474}
3475
3476static VALUE
3477call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3478{
3479 ractor_unsafe_check();
3480 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3481 return (*f)(recv, rb_ary_new4(argc, argv));
3482}
3483
3484static VALUE
3485call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3486{
3487 ractor_unsafe_check();
3488 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3489 return (*f)(argc, argv, recv);
3490}
3491
3492static VALUE
3493call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3494{
3495 ractor_unsafe_check();
3496 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3497 return (*f)(recv);
3498}
3499
3500static VALUE
3501call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3502{
3503 ractor_unsafe_check();
3504 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3505 return (*f)(recv, argv[0]);
3506}
3507
3508static VALUE
3509call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3510{
3511 ractor_unsafe_check();
3512 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3513 return (*f)(recv, argv[0], argv[1]);
3514}
3515
3516static VALUE
3517call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3518{
3519 ractor_unsafe_check();
3520 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3521 return (*f)(recv, argv[0], argv[1], argv[2]);
3522}
3523
3524static VALUE
3525call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3526{
3527 ractor_unsafe_check();
3528 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3529 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3530}
3531
3532static VALUE
3533call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3534{
3535 ractor_unsafe_check();
3536 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3537 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3538}
3539
3540static VALUE
3541call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3542{
3543 ractor_unsafe_check();
3545 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3546}
3547
3548static VALUE
3549call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3550{
3551 ractor_unsafe_check();
3553 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3554}
3555
3556static VALUE
3557call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3558{
3559 ractor_unsafe_check();
3561 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3562}
3563
3564static VALUE
3565call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3566{
3567 ractor_unsafe_check();
3569 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3570}
3571
3572static VALUE
3573call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3574{
3575 ractor_unsafe_check();
3577 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3578}
3579
3580static VALUE
3581call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3582{
3583 ractor_unsafe_check();
3585 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3586}
3587
3588static VALUE
3589call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3590{
3591 ractor_unsafe_check();
3593 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3594}
3595
3596static VALUE
3597call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3598{
3599 ractor_unsafe_check();
3601 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3602}
3603
3604static VALUE
3605call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3606{
3607 ractor_unsafe_check();
3609 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3610}
3611
3612static VALUE
3613call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3614{
3615 ractor_unsafe_check();
3617 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3618}
3619
3620static VALUE
3621ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3622{
3623 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3624 return (*f)(recv, rb_ary_new4(argc, argv));
3625}
3626
3627static VALUE
3628ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3629{
3630 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3631 return (*f)(argc, argv, recv);
3632}
3633
3634static VALUE
3635ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3636{
3637 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3638 return (*f)(recv);
3639}
3640
3641static VALUE
3642ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643{
3644 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3645 return (*f)(recv, argv[0]);
3646}
3647
3648static VALUE
3649ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3650{
3651 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3652 return (*f)(recv, argv[0], argv[1]);
3653}
3654
3655static VALUE
3656ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657{
3658 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3659 return (*f)(recv, argv[0], argv[1], argv[2]);
3660}
3661
3662static VALUE
3663ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3664{
3665 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3666 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3667}
3668
3669static VALUE
3670ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3671{
3672 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3673 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3674}
3675
3676static VALUE
3677ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3678{
3680 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3681}
3682
3683static VALUE
3684ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3685{
3687 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3688}
3689
3690static VALUE
3691ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3692{
3694 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3695}
3696
3697static VALUE
3698ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3699{
3701 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3702}
3703
3704static VALUE
3705ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3706{
3708 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3709}
3710
3711static VALUE
3712ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3713{
3715 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3716}
3717
3718static VALUE
3719ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3720{
3722 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3723}
3724
3725static VALUE
3726ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3727{
3729 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3730}
3731
3732static VALUE
3733ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3734{
3736 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3737}
3738
3739static VALUE
3740ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3741{
3743 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3744}
3745
3746static inline int
3747vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3748{
3749 const int ov_flags = RAISED_STACKOVERFLOW;
3750 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3751 if (rb_ec_raised_p(ec, ov_flags)) {
3752 rb_ec_raised_reset(ec, ov_flags);
3753 return TRUE;
3754 }
3755 return FALSE;
3756}
3757
3758#define CHECK_CFP_CONSISTENCY(func) \
3759 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3760 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3761
3762static inline
3763const rb_method_cfunc_t *
3764vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3765{
3766#if VM_DEBUG_VERIFY_METHOD_CACHE
3767 switch (me->def->type) {
3768 case VM_METHOD_TYPE_CFUNC:
3769 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3770 break;
3771# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3772 METHOD_BUG(ISEQ);
3773 METHOD_BUG(ATTRSET);
3774 METHOD_BUG(IVAR);
3775 METHOD_BUG(BMETHOD);
3776 METHOD_BUG(ZSUPER);
3777 METHOD_BUG(UNDEF);
3778 METHOD_BUG(OPTIMIZED);
3779 METHOD_BUG(MISSING);
3780 METHOD_BUG(REFINED);
3781 METHOD_BUG(ALIAS);
3782# undef METHOD_BUG
3783 default:
3784 rb_bug("wrong method type: %d", me->def->type);
3785 }
3786#endif
3787 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3788}
3789
3790static VALUE
3791vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3792 int argc, VALUE *argv, VALUE *stack_bottom)
3793{
3794 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3795 const struct rb_callinfo *ci = calling->cd->ci;
3796 const struct rb_callcache *cc = calling->cc;
3797 VALUE val;
3798 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3799 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3800
3801 VALUE recv = calling->recv;
3802 VALUE block_handler = calling->block_handler;
3803 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3804
3805 if (UNLIKELY(calling->kw_splat)) {
3806 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3807 }
3808
3809 VM_ASSERT(reg_cfp == ec->cfp);
3810
3811 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3812 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3813
3814 vm_push_frame(ec, NULL, frame_type, recv,
3815 block_handler, (VALUE)me,
3816 0, ec->cfp->sp, 0, 0);
3817
3818 int len = cfunc->argc;
3819 if (len >= 0) rb_check_arity(argc, len, len);
3820
3821 reg_cfp->sp = stack_bottom;
3822 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3823
3824 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3825
3826 rb_vm_pop_frame(ec);
3827
3828 VM_ASSERT(ec->cfp->sp == stack_bottom);
3829
3830 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3831 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3832
3833 return val;
3834}
3835
3836// Push a C method frame for a given cme. This is called when JIT code skipped
3837// pushing a frame but the C method reached a point where a frame is needed.
3838void
3839rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3840{
3841 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3842 rb_execution_context_t *ec = GET_EC();
3843 VALUE *sp = ec->cfp->sp;
3844 VALUE recv = *(sp - recv_idx - 1);
3845 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3846 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3847#if VM_CHECK_MODE > 0
3848 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3849 *(GET_EC()->cfp->sp) = Qfalse;
3850#endif
3851 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3852}
3853
3854// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3855bool
3856rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3857{
3858 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3859}
3860
3861static VALUE
3862vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3863{
3864 int argc = calling->argc;
3865 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3866 VALUE *argv = &stack_bottom[1];
3867
3868 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3869}
3870
3871static VALUE
3872vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3873{
3874 const struct rb_callinfo *ci = calling->cd->ci;
3875 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3876
3877 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3878 VALUE argv_ary;
3879 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3880 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3881 int argc = RARRAY_LENINT(argv_ary);
3882 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3883 VALUE *stack_bottom = reg_cfp->sp - 2;
3884
3885 VM_ASSERT(calling->argc == 1);
3886 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3887 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3888
3889 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3890 }
3891 else {
3892 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3893
3894 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3895 }
3896}
3897
3898static inline VALUE
3899vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3900{
3901 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3902 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3903
3904 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3905 return vm_call_cfunc_other(ec, reg_cfp, calling);
3906 }
3907
3908 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3909 calling->kw_splat = 0;
3910 int i;
3911 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3912 VALUE *sp = stack_bottom;
3913 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3914 for(i = 0; i < argc; i++) {
3915 *++sp = argv[i];
3916 }
3917 reg_cfp->sp = sp+1;
3918
3919 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3920}
3921
3922static inline VALUE
3923vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3924{
3925 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3926 VALUE argv_ary = reg_cfp->sp[-1];
3927 int argc = RARRAY_LENINT(argv_ary);
3928 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3929 VALUE last_hash;
3930 int argc_offset = 0;
3931
3932 if (UNLIKELY(argc > 0 &&
3933 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3934 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3935 if (!RHASH_EMPTY_P(last_hash)) {
3936 return vm_call_cfunc_other(ec, reg_cfp, calling);
3937 }
3938 argc_offset++;
3939 }
3940 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3941}
3942
3943static inline VALUE
3944vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3945{
3946 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3947 VALUE keyword_hash = reg_cfp->sp[-1];
3948
3949 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3950 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3951 }
3952
3953 return vm_call_cfunc_other(ec, reg_cfp, calling);
3954}
3955
3956static VALUE
3957vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3958{
3959 const struct rb_callinfo *ci = calling->cd->ci;
3960 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3961
3962 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3963 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3964 // f(*a)
3965 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3966 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3967 }
3968 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3969 // f(*a, **kw)
3970 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3971 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3972 }
3973 }
3974
3975 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3976 return vm_call_cfunc_other(ec, reg_cfp, calling);
3977}
3978
3979static VALUE
3980vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3981{
3982 const struct rb_callcache *cc = calling->cc;
3983 RB_DEBUG_COUNTER_INC(ccf_ivar);
3984 cfp->sp -= 1;
3985 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3986 return ivar;
3987}
3988
3989static VALUE
3990vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3991{
3992 RB_DEBUG_COUNTER_INC(ccf_attrset);
3993 VALUE val = *(cfp->sp - 1);
3994 cfp->sp -= 2;
3995 attr_index_t index;
3996 shape_id_t dest_shape_id;
3997 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
3998 ID id = vm_cc_cme(cc)->def->body.attr.id;
3999 rb_check_frozen(obj);
4000 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4001 if (UNDEF_P(res)) {
4002 switch (BUILTIN_TYPE(obj)) {
4003 case T_OBJECT:
4004 case T_CLASS:
4005 case T_MODULE:
4006 break;
4007 default:
4008 {
4009 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4010 if (!UNDEF_P(res)) {
4011 return res;
4012 }
4013 }
4014 }
4015 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4016 }
4017 return res;
4018}
4019
4020static VALUE
4021vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4022{
4023 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4024}
4025
4026static inline VALUE
4027vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4028{
4029 rb_proc_t *proc;
4030 VALUE val;
4031 const struct rb_callcache *cc = calling->cc;
4032 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4033 VALUE procv = cme->def->body.bmethod.proc;
4034
4035 if (!RB_OBJ_SHAREABLE_P(procv) &&
4036 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4037 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4038 }
4039
4040 /* control block frame */
4041 GetProcPtr(procv, proc);
4042 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4043
4044 return val;
4045}
4046
4047static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4048
4049static VALUE
4050vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4051{
4052 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4053
4054 const struct rb_callcache *cc = calling->cc;
4055 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4056 VALUE procv = cme->def->body.bmethod.proc;
4057
4058 if (!RB_OBJ_SHAREABLE_P(procv) &&
4059 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4060 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4061 }
4062
4063 rb_proc_t *proc;
4064 GetProcPtr(procv, proc);
4065 const struct rb_block *block = &proc->block;
4066
4067 while (vm_block_type(block) == block_type_proc) {
4068 block = vm_proc_block(block->as.proc);
4069 }
4070 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4071
4072 const struct rb_captured_block *captured = &block->as.captured;
4073 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4074 VALUE * const argv = cfp->sp - calling->argc;
4075 const int arg_size = ISEQ_BODY(iseq)->param.size;
4076
4077 int opt_pc;
4078 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4079 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4080 }
4081 else {
4082 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4083 }
4084
4085 cfp->sp = argv - 1; // -1 for the receiver
4086
4087 vm_push_frame(ec, iseq,
4088 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4089 calling->recv,
4090 VM_GUARDED_PREV_EP(captured->ep),
4091 (VALUE)cme,
4092 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4093 argv + arg_size,
4094 ISEQ_BODY(iseq)->local_table_size - arg_size,
4095 ISEQ_BODY(iseq)->stack_max);
4096
4097 return Qundef;
4098}
4099
4100static VALUE
4101vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4102{
4103 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4104
4105 VALUE *argv;
4106 int argc;
4107 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4108 if (UNLIKELY(calling->heap_argv)) {
4109 argv = RARRAY_PTR(calling->heap_argv);
4110 cfp->sp -= 2;
4111 }
4112 else {
4113 argc = calling->argc;
4114 argv = ALLOCA_N(VALUE, argc);
4115 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4116 cfp->sp += - argc - 1;
4117 }
4118
4119 return vm_call_bmethod_body(ec, calling, argv);
4120}
4121
4122static VALUE
4123vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4124{
4125 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4126
4127 const struct rb_callcache *cc = calling->cc;
4128 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4129 VALUE procv = cme->def->body.bmethod.proc;
4130 rb_proc_t *proc;
4131 GetProcPtr(procv, proc);
4132 const struct rb_block *block = &proc->block;
4133
4134 while (vm_block_type(block) == block_type_proc) {
4135 block = vm_proc_block(block->as.proc);
4136 }
4137 if (vm_block_type(block) == block_type_iseq) {
4138 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4139 return vm_call_iseq_bmethod(ec, cfp, calling);
4140 }
4141
4142 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4143 return vm_call_noniseq_bmethod(ec, cfp, calling);
4144}
4145
4146VALUE
4147rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4148{
4149 VALUE klass = current_class;
4150
4151 /* for prepended Module, then start from cover class */
4152 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4153 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4154 klass = RBASIC_CLASS(klass);
4155 }
4156
4157 while (RTEST(klass)) {
4158 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4159 if (owner == target_owner) {
4160 return klass;
4161 }
4162 klass = RCLASS_SUPER(klass);
4163 }
4164
4165 return current_class; /* maybe module function */
4166}
4167
4168static const rb_callable_method_entry_t *
4169aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4170{
4171 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4172 const rb_callable_method_entry_t *cme;
4173
4174 if (orig_me->defined_class == 0) {
4175 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4176 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4177 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4178
4179 if (me->def->reference_count == 1) {
4180 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4181 }
4182 else {
4184 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4185 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4186 }
4187 }
4188 else {
4189 cme = (const rb_callable_method_entry_t *)orig_me;
4190 }
4191
4192 VM_ASSERT(callable_method_entry_p(cme));
4193 return cme;
4194}
4195
4197rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4198{
4199 return aliased_callable_method_entry(me);
4200}
4201
4202static VALUE
4203vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4204{
4205 calling->cc = &VM_CC_ON_STACK(Qundef,
4206 vm_call_general,
4207 {{0}},
4208 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4209
4210 return vm_call_method_each_type(ec, cfp, calling);
4211}
4212
4213static enum method_missing_reason
4214ci_missing_reason(const struct rb_callinfo *ci)
4215{
4216 enum method_missing_reason stat = MISSING_NOENTRY;
4217 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4218 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4219 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4220 return stat;
4221}
4222
4223static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4224
4225static VALUE
4226vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4227 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4228{
4229 ASSUME(calling->argc >= 0);
4230
4231 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4232 int argc = calling->argc;
4233 VALUE recv = calling->recv;
4234 VALUE klass = CLASS_OF(recv);
4235 ID mid = rb_check_id(&symbol);
4236 flags |= VM_CALL_OPT_SEND;
4237
4238 if (UNLIKELY(! mid)) {
4239 mid = idMethodMissing;
4240 missing_reason = ci_missing_reason(ci);
4241 ec->method_missing_reason = missing_reason;
4242
4243 VALUE argv_ary;
4244 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4245 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4246 rb_ary_unshift(argv_ary, symbol);
4247
4248 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4249 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4250 VALUE exc = rb_make_no_method_exception(
4251 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4252
4253 rb_exc_raise(exc);
4254 }
4255 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4256 }
4257 else {
4258 /* E.g. when argc == 2
4259 *
4260 * | | | | TOPN
4261 * | | +------+
4262 * | | +---> | arg1 | 0
4263 * +------+ | +------+
4264 * | arg1 | -+ +-> | arg0 | 1
4265 * +------+ | +------+
4266 * | arg0 | ---+ | sym | 2
4267 * +------+ +------+
4268 * | recv | | recv | 3
4269 * --+------+--------+------+------
4270 */
4271 int i = argc;
4272 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4273 INC_SP(1);
4274 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4275 argc = ++calling->argc;
4276
4277 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4278 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4279 TOPN(i) = symbol;
4280 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4281 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4282 VALUE exc = rb_make_no_method_exception(
4283 rb_eNoMethodError, 0, recv, argc, argv, priv);
4284
4285 rb_exc_raise(exc);
4286 }
4287 else {
4288 TOPN(i) = rb_str_intern(symbol);
4289 }
4290 }
4291 }
4292
4293 struct rb_forwarding_call_data new_fcd = {
4294 .cd = {
4295 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4296 .cc = NULL,
4297 },
4298 .caller_ci = NULL,
4299 };
4300
4301 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4302 calling->cd = &new_fcd.cd;
4303 }
4304 else {
4305 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4306 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4307 new_fcd.caller_ci = caller_ci;
4308 calling->cd = (struct rb_call_data *)&new_fcd;
4309 }
4310 calling->cc = &VM_CC_ON_STACK(klass,
4311 vm_call_general,
4312 { .method_missing_reason = missing_reason },
4313 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4314
4315 if (flags & VM_CALL_FCALL) {
4316 return vm_call_method(ec, reg_cfp, calling);
4317 }
4318
4319 const struct rb_callcache *cc = calling->cc;
4320 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4321
4322 if (vm_cc_cme(cc) != NULL) {
4323 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4324 case METHOD_VISI_PUBLIC: /* likely */
4325 return vm_call_method_each_type(ec, reg_cfp, calling);
4326 case METHOD_VISI_PRIVATE:
4327 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4328 break;
4329 case METHOD_VISI_PROTECTED:
4330 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4331 break;
4332 default:
4333 VM_UNREACHABLE(vm_call_method);
4334 }
4335 return vm_call_method_missing(ec, reg_cfp, calling);
4336 }
4337
4338 return vm_call_method_nome(ec, reg_cfp, calling);
4339}
4340
4341static VALUE
4342vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4343{
4344 const struct rb_callinfo *ci = calling->cd->ci;
4345 int i;
4346 VALUE sym;
4347
4348 i = calling->argc - 1;
4349
4350 if (calling->argc == 0) {
4351 rb_raise(rb_eArgError, "no method name given");
4352 }
4353
4354 sym = TOPN(i);
4355 /* E.g. when i == 2
4356 *
4357 * | | | | TOPN
4358 * +------+ | |
4359 * | arg1 | ---+ | | 0
4360 * +------+ | +------+
4361 * | arg0 | -+ +-> | arg1 | 1
4362 * +------+ | +------+
4363 * | sym | +---> | arg0 | 2
4364 * +------+ +------+
4365 * | recv | | recv | 3
4366 * --+------+--------+------+------
4367 */
4368 /* shift arguments */
4369 if (i > 0) {
4370 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4371 }
4372 calling->argc -= 1;
4373 DEC_SP(1);
4374
4375 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4376}
4377
4378static VALUE
4379vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4380{
4381 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4382 const struct rb_callinfo *ci = calling->cd->ci;
4383 int flags = VM_CALL_FCALL;
4384 VALUE sym;
4385
4386 VALUE argv_ary;
4387 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4388 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4389 sym = rb_ary_shift(argv_ary);
4390 flags |= VM_CALL_ARGS_SPLAT;
4391 if (calling->kw_splat) {
4392 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4393 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4394 calling->kw_splat = 0;
4395 }
4396 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4397 }
4398
4399 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4400 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4401}
4402
4403static VALUE
4404vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4405{
4406 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4407 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4408}
4409
4410static VALUE
4411vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4412{
4413 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4414
4415 const struct rb_callinfo *ci = calling->cd->ci;
4416 int flags = vm_ci_flag(ci);
4417
4418 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4419 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4420 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4421 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4422 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4423 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4424 }
4425
4426 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4427 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4428}
4429
4430static VALUE
4431vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4432 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4433{
4434 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4435
4436 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4437 unsigned int argc, flag;
4438
4439 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4440 argc = ++calling->argc;
4441
4442 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4443 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4444 vm_check_canary(ec, reg_cfp->sp);
4445 if (argc > 1) {
4446 MEMMOVE(argv+1, argv, VALUE, argc-1);
4447 }
4448 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4449 INC_SP(1);
4450
4451 ec->method_missing_reason = reason;
4452
4453 struct rb_forwarding_call_data new_fcd = {
4454 .cd = {
4455 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4456 .cc = NULL,
4457 },
4458 .caller_ci = NULL,
4459 };
4460
4461 if (!(flag & VM_CALL_FORWARDING)) {
4462 calling->cd = &new_fcd.cd;
4463 }
4464 else {
4465 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4466 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4467 new_fcd.caller_ci = caller_ci;
4468 calling->cd = (struct rb_call_data *)&new_fcd;
4469 }
4470
4471 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4472 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4473 return vm_call_method(ec, reg_cfp, calling);
4474}
4475
4476static VALUE
4477vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4478{
4479 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4480}
4481
4482static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4483static VALUE
4484vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4485{
4486 klass = RCLASS_SUPER(klass);
4487
4488 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4489 if (cme == NULL) {
4490 return vm_call_method_nome(ec, cfp, calling);
4491 }
4492 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4493 cme->def->body.refined.orig_me) {
4494 cme = refined_method_callable_without_refinement(cme);
4495 }
4496
4497 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4498
4499 return vm_call_method_each_type(ec, cfp, calling);
4500}
4501
4502static inline VALUE
4503find_refinement(VALUE refinements, VALUE klass)
4504{
4505 if (NIL_P(refinements)) {
4506 return Qnil;
4507 }
4508 return rb_hash_lookup(refinements, klass);
4509}
4510
4511PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4512static rb_control_frame_t *
4513current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4514{
4515 rb_control_frame_t *top_cfp = cfp;
4516
4517 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4518 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4519
4520 do {
4521 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4522 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4523 /* TODO: orphan block */
4524 return top_cfp;
4525 }
4526 } while (cfp->iseq != local_iseq);
4527 }
4528 return cfp;
4529}
4530
4531static const rb_callable_method_entry_t *
4532refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4533{
4534 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4535 const rb_callable_method_entry_t *cme;
4536
4537 if (orig_me->defined_class == 0) {
4538 cme = NULL;
4540 }
4541 else {
4542 cme = (const rb_callable_method_entry_t *)orig_me;
4543 }
4544
4545 VM_ASSERT(callable_method_entry_p(cme));
4546
4547 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4548 cme = NULL;
4549 }
4550
4551 return cme;
4552}
4553
4554static const rb_callable_method_entry_t *
4555search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4556{
4557 ID mid = vm_ci_mid(calling->cd->ci);
4558 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4559 const struct rb_callcache * const cc = calling->cc;
4560 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4561
4562 for (; cref; cref = CREF_NEXT(cref)) {
4563 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4564 if (NIL_P(refinement)) continue;
4565
4566 const rb_callable_method_entry_t *const ref_me =
4567 rb_callable_method_entry(refinement, mid);
4568
4569 if (ref_me) {
4570 if (vm_cc_call(cc) == vm_call_super_method) {
4571 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4572 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4573 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4574 continue;
4575 }
4576 }
4577
4578 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4579 cme->def != ref_me->def) {
4580 cme = ref_me;
4581 }
4582 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4583 return cme;
4584 }
4585 }
4586 else {
4587 return NULL;
4588 }
4589 }
4590
4591 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4592 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4593 }
4594 else {
4595 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4596 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4597 return cme;
4598 }
4599}
4600
4601static VALUE
4602vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4603{
4604 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4605
4606 if (ref_cme) {
4607 if (calling->cd->cc) {
4608 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4609 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4610 return vm_call_method(ec, cfp, calling);
4611 }
4612 else {
4613 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4614 calling->cc= ref_cc;
4615 return vm_call_method(ec, cfp, calling);
4616 }
4617 }
4618 else {
4619 return vm_call_method_nome(ec, cfp, calling);
4620 }
4621}
4622
4623static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4624
4625NOINLINE(static VALUE
4626 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4627 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4628
4629static VALUE
4630vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4631 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4632{
4633 int argc = calling->argc;
4634
4635 /* remove self */
4636 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4637 DEC_SP(1);
4638
4639 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4640}
4641
4642static VALUE
4643vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4644{
4645 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4646
4647 const struct rb_callinfo *ci = calling->cd->ci;
4648 VALUE procval = calling->recv;
4649 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4650}
4651
4652static VALUE
4653vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4654{
4655 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4656
4657 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4658 const struct rb_callinfo *ci = calling->cd->ci;
4659
4660 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4661 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4662 }
4663 else {
4664 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4665 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4666 return vm_call_general(ec, reg_cfp, calling);
4667 }
4668}
4669
4670static VALUE
4671vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4672{
4673 VALUE recv = calling->recv;
4674
4675 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4676 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4677 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4678
4679 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4680 return internal_RSTRUCT_GET(recv, off);
4681}
4682
4683static VALUE
4684vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4685{
4686 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4687
4688 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4689 reg_cfp->sp -= 1;
4690 return ret;
4691}
4692
4693static VALUE
4694vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4695{
4696 VALUE recv = calling->recv;
4697
4698 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4699 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4700 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4701
4702 rb_check_frozen(recv);
4703
4704 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4705 internal_RSTRUCT_SET(recv, off, val);
4706
4707 return val;
4708}
4709
4710static VALUE
4711vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4712{
4713 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4714
4715 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4716 reg_cfp->sp -= 2;
4717 return ret;
4718}
4719
4720NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4721 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4722
4723#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4724 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4725 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4726 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4727 var = func; \
4728 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4729 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4730 } \
4731 else { \
4732 nohook; \
4733 var = func; \
4734 }
4735
4736static VALUE
4737vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4738 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4739{
4740 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4741 case OPTIMIZED_METHOD_TYPE_SEND:
4742 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4743 return vm_call_opt_send(ec, cfp, calling);
4744 case OPTIMIZED_METHOD_TYPE_CALL:
4745 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4746 return vm_call_opt_call(ec, cfp, calling);
4747 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4748 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4749 return vm_call_opt_block_call(ec, cfp, calling);
4750 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4751 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4752 rb_check_arity(calling->argc, 0, 0);
4753
4754 VALUE v;
4755 VM_CALL_METHOD_ATTR(v,
4756 vm_call_opt_struct_aref(ec, cfp, calling),
4757 set_vm_cc_ivar(cc); \
4758 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4759 return v;
4760 }
4761 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4762 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4763 rb_check_arity(calling->argc, 1, 1);
4764
4765 VALUE v;
4766 VM_CALL_METHOD_ATTR(v,
4767 vm_call_opt_struct_aset(ec, cfp, calling),
4768 set_vm_cc_ivar(cc); \
4769 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4770 return v;
4771 }
4772 default:
4773 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4774 }
4775}
4776
4777static VALUE
4778vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4779{
4780 const struct rb_callinfo *ci = calling->cd->ci;
4781 const struct rb_callcache *cc = calling->cc;
4782 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4783 VALUE v;
4784
4785 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4786
4787 switch (cme->def->type) {
4788 case VM_METHOD_TYPE_ISEQ:
4789 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4790 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4791 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4792 }
4793 else {
4794 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4795 return vm_call_iseq_setup(ec, cfp, calling);
4796 }
4797
4798 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4799 case VM_METHOD_TYPE_CFUNC:
4800 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4801 return vm_call_cfunc(ec, cfp, calling);
4802
4803 case VM_METHOD_TYPE_ATTRSET:
4804 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4805
4806 rb_check_arity(calling->argc, 1, 1);
4807
4808 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4809
4810 if (vm_cc_markable(cc)) {
4811 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4812 VM_CALL_METHOD_ATTR(v,
4813 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4814 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4815 }
4816 else {
4817 cc = &((struct rb_callcache) {
4818 .flags = T_IMEMO |
4819 (imemo_callcache << FL_USHIFT) |
4820 VM_CALLCACHE_UNMARKABLE |
4821 VM_CALLCACHE_ON_STACK,
4822 .klass = cc->klass,
4823 .cme_ = cc->cme_,
4824 .call_ = cc->call_,
4825 .aux_ = {
4826 .attr = {
4827 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4828 }
4829 },
4830 });
4831
4832 VM_CALL_METHOD_ATTR(v,
4833 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4834 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4835 }
4836 return v;
4837
4838 case VM_METHOD_TYPE_IVAR:
4839 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4840 rb_check_arity(calling->argc, 0, 0);
4841 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4842 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4843 VM_CALL_METHOD_ATTR(v,
4844 vm_call_ivar(ec, cfp, calling),
4845 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4846 return v;
4847
4848 case VM_METHOD_TYPE_MISSING:
4849 vm_cc_method_missing_reason_set(cc, 0);
4850 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4851 return vm_call_method_missing(ec, cfp, calling);
4852
4853 case VM_METHOD_TYPE_BMETHOD:
4854 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4855 return vm_call_bmethod(ec, cfp, calling);
4856
4857 case VM_METHOD_TYPE_ALIAS:
4858 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4859 return vm_call_alias(ec, cfp, calling);
4860
4861 case VM_METHOD_TYPE_OPTIMIZED:
4862 return vm_call_optimized(ec, cfp, calling, ci, cc);
4863
4864 case VM_METHOD_TYPE_UNDEF:
4865 break;
4866
4867 case VM_METHOD_TYPE_ZSUPER:
4868 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4869
4870 case VM_METHOD_TYPE_REFINED:
4871 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4872 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4873 return vm_call_refined(ec, cfp, calling);
4874 }
4875
4876 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4877}
4878
4879NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4880
4881static VALUE
4882vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4883{
4884 /* method missing */
4885 const struct rb_callinfo *ci = calling->cd->ci;
4886 const int stat = ci_missing_reason(ci);
4887
4888 if (vm_ci_mid(ci) == idMethodMissing) {
4889 if (UNLIKELY(calling->heap_argv)) {
4890 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4891 }
4892 else {
4893 rb_control_frame_t *reg_cfp = cfp;
4894 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4895 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4896 }
4897 }
4898 else {
4899 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4900 }
4901}
4902
4903/* Protected method calls and super invocations need to check that the receiver
4904 * (self for super) inherits the module on which the method is defined.
4905 * In the case of refinements, it should consider the original class not the
4906 * refinement.
4907 */
4908static VALUE
4909vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4910{
4911 VALUE defined_class = me->defined_class;
4912 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4913 return NIL_P(refined_class) ? defined_class : refined_class;
4914}
4915
4916static inline VALUE
4917vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4918{
4919 const struct rb_callinfo *ci = calling->cd->ci;
4920 const struct rb_callcache *cc = calling->cc;
4921
4922 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4923
4924 if (vm_cc_cme(cc) != NULL) {
4925 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4926 case METHOD_VISI_PUBLIC: /* likely */
4927 return vm_call_method_each_type(ec, cfp, calling);
4928
4929 case METHOD_VISI_PRIVATE:
4930 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4931 enum method_missing_reason stat = MISSING_PRIVATE;
4932 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4933
4934 vm_cc_method_missing_reason_set(cc, stat);
4935 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4936 return vm_call_method_missing(ec, cfp, calling);
4937 }
4938 return vm_call_method_each_type(ec, cfp, calling);
4939
4940 case METHOD_VISI_PROTECTED:
4941 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4942 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4943 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4944 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4945 return vm_call_method_missing(ec, cfp, calling);
4946 }
4947 else {
4948 /* caching method info to dummy cc */
4949 VM_ASSERT(vm_cc_cme(cc) != NULL);
4950 struct rb_callcache cc_on_stack = *cc;
4951 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4952 calling->cc = &cc_on_stack;
4953 return vm_call_method_each_type(ec, cfp, calling);
4954 }
4955 }
4956 return vm_call_method_each_type(ec, cfp, calling);
4957
4958 default:
4959 rb_bug("unreachable");
4960 }
4961 }
4962 else {
4963 return vm_call_method_nome(ec, cfp, calling);
4964 }
4965}
4966
4967static VALUE
4968vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4969{
4970 RB_DEBUG_COUNTER_INC(ccf_general);
4971 return vm_call_method(ec, reg_cfp, calling);
4972}
4973
4974void
4975rb_vm_cc_general(const struct rb_callcache *cc)
4976{
4977 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4978 VM_ASSERT(cc != vm_cc_empty());
4979
4980 *(vm_call_handler *)&cc->call_ = vm_call_general;
4981}
4982
4983static VALUE
4984vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4985{
4986 RB_DEBUG_COUNTER_INC(ccf_super_method);
4987
4988 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4989 // can merge the function and the address of the function becomes same.
4990 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4991 if (ec == NULL) rb_bug("unreachable");
4992
4993 /* this check is required to distinguish with other functions. */
4994 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4995 return vm_call_method(ec, reg_cfp, calling);
4996}
4997
4998/* super */
4999
5000static inline VALUE
5001vm_search_normal_superclass(VALUE klass)
5002{
5003 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5004 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5005 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5006 klass = RBASIC(klass)->klass;
5007 }
5008 klass = RCLASS_ORIGIN(klass);
5009 return RCLASS_SUPER(klass);
5010}
5011
5012NORETURN(static void vm_super_outside(void));
5013
5014static void
5015vm_super_outside(void)
5016{
5017 rb_raise(rb_eNoMethodError, "super called outside of method");
5018}
5019
5020static const struct rb_callcache *
5021empty_cc_for_super(void)
5022{
5023 return &vm_empty_cc_for_super;
5024}
5025
5026static const struct rb_callcache *
5027vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5028{
5029 VALUE current_defined_class;
5030 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5031
5032 if (!me) {
5033 vm_super_outside();
5034 }
5035
5036 current_defined_class = vm_defined_class_for_protected_call(me);
5037
5038 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5039 reg_cfp->iseq != method_entry_iseqptr(me) &&
5040 !rb_obj_is_kind_of(recv, current_defined_class)) {
5041 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5042 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5043
5044 if (m) { /* not bound UnboundMethod */
5045 rb_raise(rb_eTypeError,
5046 "self has wrong type to call super in this context: "
5047 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5048 rb_obj_class(recv), m);
5049 }
5050 }
5051
5052 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5053 rb_raise(rb_eRuntimeError,
5054 "implicit argument passing of super from method defined"
5055 " by define_method() is not supported."
5056 " Specify all arguments explicitly.");
5057 }
5058
5059 ID mid = me->def->original_id;
5060
5061 if (!vm_ci_markable(cd->ci)) {
5062 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5063 }
5064 else {
5065 // update iseq. really? (TODO)
5066 cd->ci = vm_ci_new_runtime(mid,
5067 vm_ci_flag(cd->ci),
5068 vm_ci_argc(cd->ci),
5069 vm_ci_kwarg(cd->ci));
5070
5071 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5072 }
5073
5074 const struct rb_callcache *cc;
5075
5076 VALUE klass = vm_search_normal_superclass(me->defined_class);
5077
5078 if (!klass) {
5079 /* bound instance method of module */
5080 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5081 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5082 }
5083 else {
5084 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5085 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5086
5087 // define_method can cache for different method id
5088 if (cached_cme == NULL) {
5089 // empty_cc_for_super is not markable object
5090 cd->cc = empty_cc_for_super();
5091 }
5092 else if (cached_cme->called_id != mid) {
5093 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5094 if (cme) {
5095 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5096 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5097 }
5098 else {
5099 cd->cc = cc = empty_cc_for_super();
5100 }
5101 }
5102 else {
5103 switch (cached_cme->def->type) {
5104 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5105 case VM_METHOD_TYPE_REFINED:
5106 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5107 case VM_METHOD_TYPE_ATTRSET:
5108 case VM_METHOD_TYPE_IVAR:
5109 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5110 break;
5111 default:
5112 break; // use fastpath
5113 }
5114 }
5115 }
5116
5117 VM_ASSERT((vm_cc_cme(cc), true));
5118
5119 return cc;
5120}
5121
5122/* yield */
5123
5124static inline int
5125block_proc_is_lambda(const VALUE procval)
5126{
5127 rb_proc_t *proc;
5128
5129 if (procval) {
5130 GetProcPtr(procval, proc);
5131 return proc->is_lambda;
5132 }
5133 else {
5134 return 0;
5135 }
5136}
5137
5138static inline const rb_namespace_t *
5139block_proc_namespace(const VALUE procval)
5140{
5141 rb_proc_t *proc;
5142
5143 if (procval) {
5144 GetProcPtr(procval, proc);
5145 return proc->ns;
5146 }
5147 else {
5148 return NULL;
5149 }
5150}
5151
5152static VALUE
5153vm_yield_with_cfunc(rb_execution_context_t *ec,
5154 const struct rb_captured_block *captured,
5155 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5157{
5158 int is_lambda = FALSE; /* TODO */
5159 VALUE val, arg, blockarg;
5160 int frame_flag;
5161 const struct vm_ifunc *ifunc = captured->code.ifunc;
5162
5163 if (is_lambda) {
5164 arg = rb_ary_new4(argc, argv);
5165 }
5166 else if (argc == 0) {
5167 arg = Qnil;
5168 }
5169 else {
5170 arg = argv[0];
5171 }
5172
5173 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5174
5175 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5176 if (kw_splat) {
5177 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5178 }
5179
5180 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5181 frame_flag,
5182 self,
5183 VM_GUARDED_PREV_EP(captured->ep),
5184 (VALUE)me,
5185 0, ec->cfp->sp, 0, 0);
5186 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5187 rb_vm_pop_frame(ec);
5188
5189 return val;
5190}
5191
5192VALUE
5193rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5194{
5195 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5196}
5197
5198static VALUE
5199vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5200{
5201 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5202}
5203
5204static inline int
5205vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5206{
5207 int i;
5208 long len = RARRAY_LEN(ary);
5209
5210 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5211
5212 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5213 argv[i] = RARRAY_AREF(ary, i);
5214 }
5215
5216 return i;
5217}
5218
5219static inline VALUE
5220vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5221{
5222 VALUE ary, arg0 = argv[0];
5223 ary = rb_check_array_type(arg0);
5224#if 0
5225 argv[0] = arg0;
5226#else
5227 VM_ASSERT(argv[0] == arg0);
5228#endif
5229 return ary;
5230}
5231
5232static int
5233vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5234{
5235 if (rb_simple_iseq_p(iseq)) {
5236 rb_control_frame_t *cfp = ec->cfp;
5237 VALUE arg0;
5238
5239 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5240
5241 if (arg_setup_type == arg_setup_block &&
5242 calling->argc == 1 &&
5243 ISEQ_BODY(iseq)->param.flags.has_lead &&
5244 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5245 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5246 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5247 }
5248
5249 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5250 if (arg_setup_type == arg_setup_block) {
5251 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5252 int i;
5253 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5254 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5255 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5256 }
5257 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5258 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5259 }
5260 }
5261 else {
5262 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5263 }
5264 }
5265
5266 return 0;
5267 }
5268 else {
5269 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5270 }
5271}
5272
5273static int
5274vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5275{
5276 struct rb_calling_info calling_entry, *calling;
5277
5278 calling = &calling_entry;
5279 calling->argc = argc;
5280 calling->block_handler = block_handler;
5281 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5282 calling->recv = Qundef;
5283 calling->heap_argv = 0;
5284 calling->cc = NULL;
5285 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5286
5287 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5288}
5289
5290/* ruby iseq -> ruby block */
5291
5292static VALUE
5293vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5294 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5295 bool is_lambda, VALUE block_handler)
5296{
5297 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5298 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5299 const int arg_size = ISEQ_BODY(iseq)->param.size;
5300 VALUE * const rsp = GET_SP() - calling->argc;
5301 VALUE * const argv = rsp;
5302 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5303 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5304
5305 SET_SP(rsp);
5306
5307 if (calling->proc_ns) {
5308 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5309 }
5310
5311 vm_push_frame(ec, iseq,
5312 frame_flag,
5313 captured->self,
5314 VM_GUARDED_PREV_EP(captured->ep), 0,
5315 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5316 rsp + arg_size,
5317 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5318
5319 return Qundef;
5320}
5321
5322static VALUE
5323vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5324 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5325 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5326{
5327 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5328 int flags = vm_ci_flag(ci);
5329
5330 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5331 ((calling->argc == 0) ||
5332 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5333 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5334 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5335 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5336 flags = 0;
5337 if (UNLIKELY(calling->heap_argv)) {
5338#if VM_ARGC_STACK_MAX < 0
5339 if (RARRAY_LEN(calling->heap_argv) < 1) {
5340 rb_raise(rb_eArgError, "no receiver given");
5341 }
5342#endif
5343 calling->recv = rb_ary_shift(calling->heap_argv);
5344 // Modify stack to avoid cfp consistency error
5345 reg_cfp->sp++;
5346 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5347 reg_cfp->sp[-2] = calling->recv;
5348 flags |= VM_CALL_ARGS_SPLAT;
5349 }
5350 else {
5351 if (calling->argc < 1) {
5352 rb_raise(rb_eArgError, "no receiver given");
5353 }
5354 calling->recv = TOPN(--calling->argc);
5355 }
5356 if (calling->kw_splat) {
5357 flags |= VM_CALL_KW_SPLAT;
5358 }
5359 }
5360 else {
5361 if (calling->argc < 1) {
5362 rb_raise(rb_eArgError, "no receiver given");
5363 }
5364 calling->recv = TOPN(--calling->argc);
5365 }
5366
5367 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5368}
5369
5370static VALUE
5371vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5372 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5373 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5374{
5375 VALUE val;
5376 int argc;
5377 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5378 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5379 argc = calling->argc;
5380 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5381 POPN(argc); /* TODO: should put before C/yield? */
5382 return val;
5383}
5384
5385static VALUE
5386vm_proc_to_block_handler(VALUE procval)
5387{
5388 const struct rb_block *block = vm_proc_block(procval);
5389
5390 switch (vm_block_type(block)) {
5391 case block_type_iseq:
5392 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5393 case block_type_ifunc:
5394 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5395 case block_type_symbol:
5396 return VM_BH_FROM_SYMBOL(block->as.symbol);
5397 case block_type_proc:
5398 return VM_BH_FROM_PROC(block->as.proc);
5399 }
5400 VM_UNREACHABLE(vm_yield_with_proc);
5401 return Qundef;
5402}
5403
5404static VALUE
5405vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5406 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5407 bool is_lambda, VALUE block_handler)
5408{
5409 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5410 VALUE proc = VM_BH_TO_PROC(block_handler);
5411 if (!calling->proc_ns) {
5412 calling->proc_ns = block_proc_namespace(proc);
5413 }
5414 is_lambda = block_proc_is_lambda(proc);
5415 block_handler = vm_proc_to_block_handler(proc);
5416 }
5417
5418 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5419}
5420
5421static inline VALUE
5422vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5423 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5424 bool is_lambda, VALUE block_handler)
5425{
5426 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5427 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5428 bool is_lambda, VALUE block_handler);
5429
5430 switch (vm_block_handler_type(block_handler)) {
5431 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5432 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5433 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5434 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5435 default: rb_bug("vm_invoke_block: unreachable");
5436 }
5437
5438 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5439}
5440
5441static VALUE
5442vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5443{
5444 const rb_execution_context_t *ec = GET_EC();
5445 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5446 struct rb_captured_block *captured;
5447
5448 if (cfp == 0) {
5449 rb_bug("vm_make_proc_with_iseq: unreachable");
5450 }
5451
5452 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5453 captured->code.iseq = blockiseq;
5454
5455 return rb_vm_make_proc(ec, captured, rb_cProc);
5456}
5457
5458static VALUE
5459vm_once_exec(VALUE iseq)
5460{
5461 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5462 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5463}
5464
5465static VALUE
5466vm_once_clear(VALUE data)
5467{
5468 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5469 is->once.running_thread = NULL;
5470 return Qnil;
5471}
5472
5473/* defined insn */
5474
5475static bool
5476check_respond_to_missing(VALUE obj, VALUE v)
5477{
5478 VALUE args[2];
5479 VALUE r;
5480
5481 args[0] = obj; args[1] = Qfalse;
5482 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5483 if (!UNDEF_P(r) && RTEST(r)) {
5484 return true;
5485 }
5486 else {
5487 return false;
5488 }
5489}
5490
5491static bool
5492vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5493{
5494 VALUE klass;
5495 enum defined_type type = (enum defined_type)op_type;
5496
5497 switch (type) {
5498 case DEFINED_IVAR:
5499 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5500 break;
5501 case DEFINED_GVAR:
5502 return rb_gvar_defined(SYM2ID(obj));
5503 break;
5504 case DEFINED_CVAR: {
5505 const rb_cref_t *cref = vm_get_cref(GET_EP());
5506 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5507 return rb_cvar_defined(klass, SYM2ID(obj));
5508 break;
5509 }
5510 case DEFINED_CONST:
5511 case DEFINED_CONST_FROM: {
5512 bool allow_nil = type == DEFINED_CONST;
5513 klass = v;
5514 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5515 break;
5516 }
5517 case DEFINED_FUNC:
5518 klass = CLASS_OF(v);
5519 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5520 break;
5521 case DEFINED_METHOD:{
5522 VALUE klass = CLASS_OF(v);
5523 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5524
5525 if (me) {
5526 switch (METHOD_ENTRY_VISI(me)) {
5527 case METHOD_VISI_PRIVATE:
5528 break;
5529 case METHOD_VISI_PROTECTED:
5530 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5531 break;
5532 }
5533 case METHOD_VISI_PUBLIC:
5534 return true;
5535 break;
5536 default:
5537 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5538 }
5539 }
5540 else {
5541 return check_respond_to_missing(obj, v);
5542 }
5543 break;
5544 }
5545 case DEFINED_YIELD:
5546 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5547 return true;
5548 }
5549 break;
5550 case DEFINED_ZSUPER:
5551 {
5552 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5553
5554 if (me) {
5555 VALUE klass = vm_search_normal_superclass(me->defined_class);
5556 if (!klass) return false;
5557
5558 ID id = me->def->original_id;
5559
5560 return rb_method_boundp(klass, id, 0);
5561 }
5562 }
5563 break;
5564 case DEFINED_REF:
5565 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5566 default:
5567 rb_bug("unimplemented defined? type (VM)");
5568 break;
5569 }
5570
5571 return false;
5572}
5573
5574bool
5575rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5576{
5577 return vm_defined(ec, reg_cfp, op_type, obj, v);
5578}
5579
5580static const VALUE *
5581vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5582{
5583 rb_num_t i;
5584 const VALUE *ep = reg_ep;
5585 for (i = 0; i < lv; i++) {
5586 ep = GET_PREV_EP(ep);
5587 }
5588 return ep;
5589}
5590
5591static VALUE
5592vm_get_special_object(const VALUE *const reg_ep,
5593 enum vm_special_object_type type)
5594{
5595 switch (type) {
5596 case VM_SPECIAL_OBJECT_VMCORE:
5597 return rb_mRubyVMFrozenCore;
5598 case VM_SPECIAL_OBJECT_CBASE:
5599 return vm_get_cbase(reg_ep);
5600 case VM_SPECIAL_OBJECT_CONST_BASE:
5601 return vm_get_const_base(reg_ep);
5602 default:
5603 rb_bug("putspecialobject insn: unknown value_type %d", type);
5604 }
5605}
5606
5607// ZJIT implementation is using the C function
5608// and needs to call a non-static function
5609VALUE
5610rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5611{
5612 return vm_get_special_object(reg_ep, type);
5613}
5614
5615static VALUE
5616vm_concat_array(VALUE ary1, VALUE ary2st)
5617{
5618 const VALUE ary2 = ary2st;
5619 VALUE tmp1 = rb_check_to_array(ary1);
5620 VALUE tmp2 = rb_check_to_array(ary2);
5621
5622 if (NIL_P(tmp1)) {
5623 tmp1 = rb_ary_new3(1, ary1);
5624 }
5625 if (tmp1 == ary1) {
5626 tmp1 = rb_ary_dup(ary1);
5627 }
5628
5629 if (NIL_P(tmp2)) {
5630 return rb_ary_push(tmp1, ary2);
5631 }
5632 else {
5633 return rb_ary_concat(tmp1, tmp2);
5634 }
5635}
5636
5637static VALUE
5638vm_concat_to_array(VALUE ary1, VALUE ary2st)
5639{
5640 /* ary1 must be a newly created array */
5641 const VALUE ary2 = ary2st;
5642
5643 if (NIL_P(ary2)) return ary1;
5644
5645 VALUE tmp2 = rb_check_to_array(ary2);
5646
5647 if (NIL_P(tmp2)) {
5648 return rb_ary_push(ary1, ary2);
5649 }
5650 else {
5651 return rb_ary_concat(ary1, tmp2);
5652 }
5653}
5654
5655// YJIT implementation is using the C function
5656// and needs to call a non-static function
5657VALUE
5658rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5659{
5660 return vm_concat_array(ary1, ary2st);
5661}
5662
5663VALUE
5664rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5665{
5666 return vm_concat_to_array(ary1, ary2st);
5667}
5668
5669static VALUE
5670vm_splat_array(VALUE flag, VALUE ary)
5671{
5672 if (NIL_P(ary)) {
5673 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5674 }
5675 VALUE tmp = rb_check_to_array(ary);
5676 if (NIL_P(tmp)) {
5677 return rb_ary_new3(1, ary);
5678 }
5679 else if (RTEST(flag)) {
5680 return rb_ary_dup(tmp);
5681 }
5682 else {
5683 return tmp;
5684 }
5685}
5686
5687// YJIT implementation is using the C function
5688// and needs to call a non-static function
5689VALUE
5690rb_vm_splat_array(VALUE flag, VALUE ary)
5691{
5692 return vm_splat_array(flag, ary);
5693}
5694
5695static VALUE
5696vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5697{
5698 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5699
5700 if (flag & VM_CHECKMATCH_ARRAY) {
5701 long i;
5702 const long n = RARRAY_LEN(pattern);
5703
5704 for (i = 0; i < n; i++) {
5705 VALUE v = RARRAY_AREF(pattern, i);
5706 VALUE c = check_match(ec, v, target, type);
5707
5708 if (RTEST(c)) {
5709 return c;
5710 }
5711 }
5712 return Qfalse;
5713 }
5714 else {
5715 return check_match(ec, pattern, target, type);
5716 }
5717}
5718
5719VALUE
5720rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5721{
5722 return vm_check_match(ec, target, pattern, flag);
5723}
5724
5725static VALUE
5726vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5727{
5728 const VALUE kw_bits = *(ep - bits);
5729
5730 if (FIXNUM_P(kw_bits)) {
5731 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5732 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5733 return Qfalse;
5734 }
5735 else {
5736 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5737 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5738 }
5739 return Qtrue;
5740}
5741
5742static void
5743vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5744{
5745 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5746 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5747 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5748 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5749
5750 switch (flag) {
5751 case RUBY_EVENT_CALL:
5752 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5753 return;
5754 case RUBY_EVENT_C_CALL:
5755 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5756 return;
5757 case RUBY_EVENT_RETURN:
5758 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5759 return;
5761 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5762 return;
5763 }
5764 }
5765}
5766
5767static VALUE
5768vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5769{
5770 if (!rb_const_defined_at(cbase, id)) {
5771 return 0;
5772 }
5773 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5774 return rb_public_const_get_at(cbase, id);
5775 }
5776 else {
5777 return rb_const_get_at(cbase, id);
5778 }
5779}
5780
5781static VALUE
5782vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5783{
5784 if (!RB_TYPE_P(klass, T_CLASS)) {
5785 return 0;
5786 }
5787 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5788 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5789
5790 if (tmp != super) {
5791 rb_raise(rb_eTypeError,
5792 "superclass mismatch for class %"PRIsVALUE"",
5793 rb_id2str(id));
5794 }
5795 else {
5796 return klass;
5797 }
5798 }
5799 else {
5800 return klass;
5801 }
5802}
5803
5804static VALUE
5805vm_check_if_module(ID id, VALUE mod)
5806{
5807 if (!RB_TYPE_P(mod, T_MODULE)) {
5808 return 0;
5809 }
5810 else {
5811 return mod;
5812 }
5813}
5814
5815static VALUE
5816declare_under(ID id, VALUE cbase, VALUE c)
5817{
5818 rb_set_class_path_string(c, cbase, rb_id2str(id));
5819 rb_const_set(cbase, id, c);
5820 return c;
5821}
5822
5823static VALUE
5824vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5825{
5826 /* new class declaration */
5827 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5828 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5830 rb_class_inherited(s, c);
5831 return c;
5832}
5833
5834static VALUE
5835vm_declare_module(ID id, VALUE cbase)
5836{
5837 /* new module declaration */
5838 return declare_under(id, cbase, rb_module_new());
5839}
5840
5841NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5842static void
5843unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5844{
5845 VALUE name = rb_id2str(id);
5846 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5847 name, type);
5848 VALUE location = rb_const_source_location_at(cbase, id);
5849 if (!NIL_P(location)) {
5850 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5851 " previous definition of %"PRIsVALUE" was here",
5852 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5853 }
5855}
5856
5857static VALUE
5858vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5859{
5860 VALUE klass;
5861
5862 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5863 rb_raise(rb_eTypeError,
5864 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5865 rb_obj_class(super));
5866 }
5867
5868 vm_check_if_namespace(cbase);
5869
5870 /* find klass */
5871 rb_autoload_load(cbase, id);
5872
5873 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5874 if (!vm_check_if_class(id, flags, super, klass))
5875 unmatched_redefinition("class", cbase, id, klass);
5876 return klass;
5877 }
5878 else {
5879 return vm_declare_class(id, flags, cbase, super);
5880 }
5881}
5882
5883static VALUE
5884vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5885{
5886 VALUE mod;
5887
5888 vm_check_if_namespace(cbase);
5889 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5890 if (!vm_check_if_module(id, mod))
5891 unmatched_redefinition("module", cbase, id, mod);
5892 return mod;
5893 }
5894 else {
5895 return vm_declare_module(id, cbase);
5896 }
5897}
5898
5899static VALUE
5900vm_find_or_create_class_by_id(ID id,
5901 rb_num_t flags,
5902 VALUE cbase,
5903 VALUE super)
5904{
5905 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5906
5907 switch (type) {
5908 case VM_DEFINECLASS_TYPE_CLASS:
5909 /* classdef returns class scope value */
5910 return vm_define_class(id, flags, cbase, super);
5911
5912 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5913 /* classdef returns class scope value */
5914 return rb_singleton_class(cbase);
5915
5916 case VM_DEFINECLASS_TYPE_MODULE:
5917 /* classdef returns class scope value */
5918 return vm_define_module(id, flags, cbase);
5919
5920 default:
5921 rb_bug("unknown defineclass type: %d", (int)type);
5922 }
5923}
5924
5925static rb_method_visibility_t
5926vm_scope_visibility_get(const rb_execution_context_t *ec)
5927{
5928 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5929
5930 if (!vm_env_cref_by_cref(cfp->ep)) {
5931 return METHOD_VISI_PUBLIC;
5932 }
5933 else {
5934 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5935 }
5936}
5937
5938static int
5939vm_scope_module_func_check(const rb_execution_context_t *ec)
5940{
5941 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5942
5943 if (!vm_env_cref_by_cref(cfp->ep)) {
5944 return FALSE;
5945 }
5946 else {
5947 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5948 }
5949}
5950
5951static void
5952vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5953{
5954 VALUE klass;
5955 rb_method_visibility_t visi;
5956 rb_cref_t *cref = vm_ec_cref(ec);
5957
5958 if (is_singleton) {
5959 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5960 visi = METHOD_VISI_PUBLIC;
5961 }
5962 else {
5963 klass = CREF_CLASS_FOR_DEFINITION(cref);
5964 visi = vm_scope_visibility_get(ec);
5965 }
5966
5967 if (NIL_P(klass)) {
5968 rb_raise(rb_eTypeError, "no class/module to add method");
5969 }
5970
5971 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5972 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5973 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5974 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
5975 }
5976
5977 if (!is_singleton && vm_scope_module_func_check(ec)) {
5978 klass = rb_singleton_class(klass);
5979 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5980 }
5981}
5982
5983static VALUE
5984vm_invokeblock_i(struct rb_execution_context_struct *ec,
5985 struct rb_control_frame_struct *reg_cfp,
5986 struct rb_calling_info *calling)
5987{
5988 const struct rb_callinfo *ci = calling->cd->ci;
5989 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5990
5991 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5992 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5993 }
5994 else {
5995 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5996 }
5997}
5998
5999enum method_explorer_type {
6000 mexp_search_method,
6001 mexp_search_invokeblock,
6002 mexp_search_super,
6003};
6004
6005static inline VALUE
6006vm_sendish(
6007 struct rb_execution_context_struct *ec,
6008 struct rb_control_frame_struct *reg_cfp,
6009 struct rb_call_data *cd,
6010 VALUE block_handler,
6011 enum method_explorer_type method_explorer
6012) {
6013 VALUE val = Qundef;
6014 const struct rb_callinfo *ci = cd->ci;
6015 const struct rb_callcache *cc;
6016 int argc = vm_ci_argc(ci);
6017 VALUE recv = TOPN(argc);
6018 struct rb_calling_info calling = {
6019 .block_handler = block_handler,
6020 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6021 .recv = recv,
6022 .argc = argc,
6023 .cd = cd,
6024 };
6025
6026 switch (method_explorer) {
6027 case mexp_search_method:
6028 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6029 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6030 break;
6031 case mexp_search_super:
6032 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6033 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6034 break;
6035 case mexp_search_invokeblock:
6036 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6037 break;
6038 }
6039 return val;
6040}
6041
6042VALUE
6043rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6044{
6045 stack_check(ec);
6046
6047 struct rb_forwarding_call_data adjusted_cd;
6048 struct rb_callinfo adjusted_ci;
6049
6050 VALUE bh;
6051 VALUE val;
6052
6053 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6054 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6055
6056 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6057
6058 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6059 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6060 }
6061 }
6062 else {
6063 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6064 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6065 }
6066
6067 VM_EXEC(ec, val);
6068 return val;
6069}
6070
6071VALUE
6072rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6073{
6074 stack_check(ec);
6075 VALUE bh = VM_BLOCK_HANDLER_NONE;
6076 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6077 VM_EXEC(ec, val);
6078 return val;
6079}
6080
6081VALUE
6082rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6083{
6084 stack_check(ec);
6085 struct rb_forwarding_call_data adjusted_cd;
6086 struct rb_callinfo adjusted_ci;
6087
6088 VALUE bh;
6089 VALUE val;
6090
6091 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6092 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6093
6094 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6095
6096 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6097 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6098 }
6099 }
6100 else {
6101 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6102 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6103 }
6104
6105 VM_EXEC(ec, val);
6106 return val;
6107}
6108
6109VALUE
6110rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6111{
6112 stack_check(ec);
6113 VALUE bh = VM_BLOCK_HANDLER_NONE;
6114 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6115 VM_EXEC(ec, val);
6116 return val;
6117}
6118
6119/* object.c */
6120VALUE rb_nil_to_s(VALUE);
6121VALUE rb_true_to_s(VALUE);
6122VALUE rb_false_to_s(VALUE);
6123/* numeric.c */
6124VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6125VALUE rb_fix_to_s(VALUE);
6126/* variable.c */
6127VALUE rb_mod_to_s(VALUE);
6129
6130static VALUE
6131vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6132{
6133 int type = TYPE(recv);
6134 if (type == T_STRING) {
6135 return recv;
6136 }
6137
6138 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
6139
6140 switch (type) {
6141 case T_SYMBOL:
6142 if (check_method_basic_definition(cme)) {
6143 // rb_sym_to_s() allocates a mutable string, but since we are only
6144 // going to use this string for interpolation, it's fine to use the
6145 // frozen string.
6146 return rb_sym2str(recv);
6147 }
6148 break;
6149 case T_MODULE:
6150 case T_CLASS:
6151 if (check_cfunc(cme, rb_mod_to_s)) {
6152 // rb_mod_to_s() allocates a mutable string, but since we are only
6153 // going to use this string for interpolation, it's fine to use the
6154 // frozen string.
6155 VALUE val = rb_mod_name(recv);
6156 if (NIL_P(val)) {
6157 val = rb_mod_to_s(recv);
6158 }
6159 return val;
6160 }
6161 break;
6162 case T_NIL:
6163 if (check_cfunc(cme, rb_nil_to_s)) {
6164 return rb_nil_to_s(recv);
6165 }
6166 break;
6167 case T_TRUE:
6168 if (check_cfunc(cme, rb_true_to_s)) {
6169 return rb_true_to_s(recv);
6170 }
6171 break;
6172 case T_FALSE:
6173 if (check_cfunc(cme, rb_false_to_s)) {
6174 return rb_false_to_s(recv);
6175 }
6176 break;
6177 case T_FIXNUM:
6178 if (check_cfunc(cme, rb_int_to_s)) {
6179 return rb_fix_to_s(recv);
6180 }
6181 break;
6182 }
6183 return Qundef;
6184}
6185
6186// ZJIT implementation is using the C function
6187// and needs to call a non-static function
6188VALUE
6189rb_vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6190{
6191 return vm_objtostring(iseq, recv, cd);
6192}
6193
6194static VALUE
6195vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6196{
6197 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6198 return ary;
6199 }
6200 else {
6201 return Qundef;
6202 }
6203}
6204
6205static VALUE
6206vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6207{
6208 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6209 return hash;
6210 }
6211 else {
6212 return Qundef;
6213 }
6214}
6215
6216static VALUE
6217vm_opt_str_freeze(VALUE str, int bop, ID id)
6218{
6219 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6220 return str;
6221 }
6222 else {
6223 return Qundef;
6224 }
6225}
6226
6227/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6228#define id_cmp idCmp
6229
6230static VALUE
6231vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6232{
6233 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6234 return rb_ary_includes(ary, target);
6235 }
6236 else {
6237 VALUE args[1] = {target};
6238
6239 // duparray
6240 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6241 VALUE dupary = rb_ary_resurrect(ary);
6242
6243 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6244 }
6245}
6246
6247VALUE
6248rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6249{
6250 return vm_opt_duparray_include_p(ec, ary, target);
6251}
6252
6253static VALUE
6254vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6255{
6256 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6257 if (num == 0) {
6258 return Qnil;
6259 }
6260 else {
6261 VALUE result = *ptr;
6262 rb_snum_t i = num - 1;
6263 while (i-- > 0) {
6264 const VALUE v = *++ptr;
6265 if (OPTIMIZED_CMP(v, result) > 0) {
6266 result = v;
6267 }
6268 }
6269 return result;
6270 }
6271 }
6272 else {
6273 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6274 }
6275}
6276
6277VALUE
6278rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6279{
6280 return vm_opt_newarray_max(ec, num, ptr);
6281}
6282
6283static VALUE
6284vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6285{
6286 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6287 if (num == 0) {
6288 return Qnil;
6289 }
6290 else {
6291 VALUE result = *ptr;
6292 rb_snum_t i = num - 1;
6293 while (i-- > 0) {
6294 const VALUE v = *++ptr;
6295 if (OPTIMIZED_CMP(v, result) < 0) {
6296 result = v;
6297 }
6298 }
6299 return result;
6300 }
6301 }
6302 else {
6303 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6304 }
6305}
6306
6307VALUE
6308rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6309{
6310 return vm_opt_newarray_min(ec, num, ptr);
6311}
6312
6313static VALUE
6314vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6315{
6316 // If Array#hash is _not_ monkeypatched, use the optimized call
6317 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6318 return rb_ary_hash_values(num, ptr);
6319 }
6320 else {
6321 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6322 }
6323}
6324
6325VALUE
6326rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6327{
6328 return vm_opt_newarray_hash(ec, num, ptr);
6329}
6330
6331VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6332VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6333
6334static VALUE
6335vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6336{
6337 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6338 struct RArray fake_ary;
6339 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6340 return rb_ary_includes(ary, target);
6341 }
6342 else {
6343 VALUE args[1] = {target};
6344 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6345 }
6346}
6347
6348VALUE
6349rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6350{
6351 return vm_opt_newarray_include_p(ec, num, ptr, target);
6352}
6353
6354static VALUE
6355vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6356{
6357 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6358 struct RArray fake_ary;
6359 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6360 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6361 }
6362 else {
6363 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6364 // Setup an array with room for keyword hash.
6365 VALUE args[2];
6366 args[0] = fmt;
6367 int kw_splat = RB_NO_KEYWORDS;
6368 int argc = 1;
6369
6370 if (!UNDEF_P(buffer)) {
6371 args[1] = rb_hash_new_with_size(1);
6372 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6373 kw_splat = RB_PASS_KEYWORDS;
6374 argc++;
6375 }
6376
6377 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6378 }
6379}
6380
6381VALUE
6382rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6383{
6384 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6385}
6386
6387VALUE
6388rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6389{
6390 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6391}
6392
6393#undef id_cmp
6394
6395static void
6396vm_track_constant_cache(ID id, void *ic)
6397{
6398 rb_vm_t *vm = GET_VM();
6399 struct rb_id_table *const_cache = vm->constant_cache;
6400 VALUE lookup_result;
6401 set_table *ics;
6402
6403 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6404 ics = (set_table *)lookup_result;
6405 }
6406 else {
6407 ics = set_init_numtable();
6408 rb_id_table_insert(const_cache, id, (VALUE)ics);
6409 }
6410
6411 /* The call below to st_insert could allocate which could trigger a GC.
6412 * If it triggers a GC, it may free an iseq that also holds a cache to this
6413 * constant. If that iseq is the last iseq with a cache to this constant, then
6414 * it will free this ST table, which would cause an use-after-free during this
6415 * st_insert.
6416 *
6417 * So to fix this issue, we store the ID that is currently being inserted
6418 * and, in remove_from_constant_cache, we don't free the ST table for ID
6419 * equal to this one.
6420 *
6421 * See [Bug #20921].
6422 */
6423 vm->inserting_constant_cache_id = id;
6424
6425 set_insert(ics, (st_data_t)ic);
6426
6427 vm->inserting_constant_cache_id = (ID)0;
6428}
6429
6430static void
6431vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6432{
6433 RB_VM_LOCKING() {
6434 for (int i = 0; segments[i]; i++) {
6435 ID id = segments[i];
6436 if (id == idNULL) continue;
6437 vm_track_constant_cache(id, ic);
6438 }
6439 }
6440}
6441
6442// For JIT inlining
6443static inline bool
6444vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6445{
6446 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6447 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6448
6449 return (ic_cref == NULL || // no need to check CREF
6450 ic_cref == vm_get_cref(reg_ep));
6451 }
6452 return false;
6453}
6454
6455static bool
6456vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6457{
6458 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6459 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6460}
6461
6462// YJIT needs this function to never allocate and never raise
6463bool
6464rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6465{
6466 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6467}
6468
6469static void
6470vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6471{
6472 if (ruby_vm_const_missing_count > 0) {
6473 ruby_vm_const_missing_count = 0;
6474 ic->entry = NULL;
6475 return;
6476 }
6477
6478 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6479 RB_OBJ_WRITE(ice, &ice->value, val);
6480 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6481 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6482 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6483
6484 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6485 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6486 rb_yjit_constant_ic_update(iseq, ic, pos);
6487}
6488
6489VALUE
6490rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6491{
6492 VALUE val;
6493 const ID *segments = ic->segments;
6494 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6495 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6496 val = ice->value;
6497
6498 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6499 }
6500 else {
6501 ruby_vm_constant_cache_misses++;
6502 val = vm_get_ev_const_chain(ec, segments);
6503 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6504 // Undo the PC increment to get the address to this instruction
6505 // INSN_ATTR(width) == 2
6506 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6507 }
6508 return val;
6509}
6510
6511static VALUE
6512vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6513{
6514 rb_thread_t *th = rb_ec_thread_ptr(ec);
6515 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6516
6517 again:
6518 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6519 return is->once.value;
6520 }
6521 else if (is->once.running_thread == NULL) {
6522 VALUE val;
6523 is->once.running_thread = th;
6524 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6525 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6526 /* is->once.running_thread is cleared by vm_once_clear() */
6527 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6528 return val;
6529 }
6530 else if (is->once.running_thread == th) {
6531 /* recursive once */
6532 return vm_once_exec((VALUE)iseq);
6533 }
6534 else {
6535 /* waiting for finish */
6536 RUBY_VM_CHECK_INTS(ec);
6538 goto again;
6539 }
6540}
6541
6542static OFFSET
6543vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6544{
6545 switch (OBJ_BUILTIN_TYPE(key)) {
6546 case -1:
6547 case T_FLOAT:
6548 case T_SYMBOL:
6549 case T_BIGNUM:
6550 case T_STRING:
6551 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6552 SYMBOL_REDEFINED_OP_FLAG |
6553 INTEGER_REDEFINED_OP_FLAG |
6554 FLOAT_REDEFINED_OP_FLAG |
6555 NIL_REDEFINED_OP_FLAG |
6556 TRUE_REDEFINED_OP_FLAG |
6557 FALSE_REDEFINED_OP_FLAG |
6558 STRING_REDEFINED_OP_FLAG)) {
6559 st_data_t val;
6560 if (RB_FLOAT_TYPE_P(key)) {
6561 double kval = RFLOAT_VALUE(key);
6562 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6563 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6564 }
6565 }
6566 if (rb_hash_stlike_lookup(hash, key, &val)) {
6567 return FIX2LONG((VALUE)val);
6568 }
6569 else {
6570 return else_offset;
6571 }
6572 }
6573 }
6574 return 0;
6575}
6576
6577NORETURN(static void
6578 vm_stack_consistency_error(const rb_execution_context_t *ec,
6579 const rb_control_frame_t *,
6580 const VALUE *));
6581static void
6582vm_stack_consistency_error(const rb_execution_context_t *ec,
6583 const rb_control_frame_t *cfp,
6584 const VALUE *bp)
6585{
6586 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6587 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6588 static const char stack_consistency_error[] =
6589 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6590#if defined RUBY_DEVEL
6591 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6592 rb_str_cat_cstr(mesg, "\n");
6593 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6595#else
6596 rb_bug(stack_consistency_error, nsp, nbp);
6597#endif
6598}
6599
6600static VALUE
6601vm_opt_plus(VALUE recv, VALUE obj)
6602{
6603 if (FIXNUM_2_P(recv, obj) &&
6604 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6605 return rb_fix_plus_fix(recv, obj);
6606 }
6607 else if (FLONUM_2_P(recv, obj) &&
6608 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6609 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6610 }
6611 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6612 return Qundef;
6613 }
6614 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6615 RBASIC_CLASS(obj) == rb_cFloat &&
6616 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6617 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6618 }
6619 else if (RBASIC_CLASS(recv) == rb_cString &&
6620 RBASIC_CLASS(obj) == rb_cString &&
6621 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6622 return rb_str_opt_plus(recv, obj);
6623 }
6624 else if (RBASIC_CLASS(recv) == rb_cArray &&
6625 RBASIC_CLASS(obj) == rb_cArray &&
6626 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6627 return rb_ary_plus(recv, obj);
6628 }
6629 else {
6630 return Qundef;
6631 }
6632}
6633
6634static VALUE
6635vm_opt_minus(VALUE recv, VALUE obj)
6636{
6637 if (FIXNUM_2_P(recv, obj) &&
6638 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6639 return rb_fix_minus_fix(recv, obj);
6640 }
6641 else if (FLONUM_2_P(recv, obj) &&
6642 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6643 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6644 }
6645 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6646 return Qundef;
6647 }
6648 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6649 RBASIC_CLASS(obj) == rb_cFloat &&
6650 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6651 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6652 }
6653 else {
6654 return Qundef;
6655 }
6656}
6657
6658static VALUE
6659vm_opt_mult(VALUE recv, VALUE obj)
6660{
6661 if (FIXNUM_2_P(recv, obj) &&
6662 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6663 return rb_fix_mul_fix(recv, obj);
6664 }
6665 else if (FLONUM_2_P(recv, obj) &&
6666 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6667 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6668 }
6669 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6670 return Qundef;
6671 }
6672 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6673 RBASIC_CLASS(obj) == rb_cFloat &&
6674 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6675 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6676 }
6677 else {
6678 return Qundef;
6679 }
6680}
6681
6682static VALUE
6683vm_opt_div(VALUE recv, VALUE obj)
6684{
6685 if (FIXNUM_2_P(recv, obj) &&
6686 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6687 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6688 }
6689 else if (FLONUM_2_P(recv, obj) &&
6690 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6691 return rb_flo_div_flo(recv, obj);
6692 }
6693 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6694 return Qundef;
6695 }
6696 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6697 RBASIC_CLASS(obj) == rb_cFloat &&
6698 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6699 return rb_flo_div_flo(recv, obj);
6700 }
6701 else {
6702 return Qundef;
6703 }
6704}
6705
6706static VALUE
6707vm_opt_mod(VALUE recv, VALUE obj)
6708{
6709 if (FIXNUM_2_P(recv, obj) &&
6710 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6711 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6712 }
6713 else if (FLONUM_2_P(recv, obj) &&
6714 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6715 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6716 }
6717 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6718 return Qundef;
6719 }
6720 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6721 RBASIC_CLASS(obj) == rb_cFloat &&
6722 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6723 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6724 }
6725 else {
6726 return Qundef;
6727 }
6728}
6729
6730static VALUE
6731vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6732{
6733 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6734 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6735
6736 if (!UNDEF_P(val)) {
6737 return RBOOL(!RTEST(val));
6738 }
6739 }
6740
6741 return Qundef;
6742}
6743
6744static VALUE
6745vm_opt_lt(VALUE recv, VALUE obj)
6746{
6747 if (FIXNUM_2_P(recv, obj) &&
6748 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6749 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6750 }
6751 else if (FLONUM_2_P(recv, obj) &&
6752 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6753 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6754 }
6755 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6756 return Qundef;
6757 }
6758 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6759 RBASIC_CLASS(obj) == rb_cFloat &&
6760 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6761 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6762 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6763 }
6764 else {
6765 return Qundef;
6766 }
6767}
6768
6769static VALUE
6770vm_opt_le(VALUE recv, VALUE obj)
6771{
6772 if (FIXNUM_2_P(recv, obj) &&
6773 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6774 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6775 }
6776 else if (FLONUM_2_P(recv, obj) &&
6777 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6778 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6779 }
6780 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6781 return Qundef;
6782 }
6783 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6784 RBASIC_CLASS(obj) == rb_cFloat &&
6785 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6786 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6787 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6788 }
6789 else {
6790 return Qundef;
6791 }
6792}
6793
6794static VALUE
6795vm_opt_gt(VALUE recv, VALUE obj)
6796{
6797 if (FIXNUM_2_P(recv, obj) &&
6798 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6799 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6800 }
6801 else if (FLONUM_2_P(recv, obj) &&
6802 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6803 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6804 }
6805 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6806 return Qundef;
6807 }
6808 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6809 RBASIC_CLASS(obj) == rb_cFloat &&
6810 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6811 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6812 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6813 }
6814 else {
6815 return Qundef;
6816 }
6817}
6818
6819static VALUE
6820vm_opt_ge(VALUE recv, VALUE obj)
6821{
6822 if (FIXNUM_2_P(recv, obj) &&
6823 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6824 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6825 }
6826 else if (FLONUM_2_P(recv, obj) &&
6827 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6828 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6829 }
6830 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6831 return Qundef;
6832 }
6833 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6834 RBASIC_CLASS(obj) == rb_cFloat &&
6835 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6836 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6837 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6838 }
6839 else {
6840 return Qundef;
6841 }
6842}
6843
6844
6845static VALUE
6846vm_opt_ltlt(VALUE recv, VALUE obj)
6847{
6848 if (SPECIAL_CONST_P(recv)) {
6849 return Qundef;
6850 }
6851 else if (RBASIC_CLASS(recv) == rb_cString &&
6852 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6853 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6854 return rb_str_buf_append(recv, obj);
6855 }
6856 else {
6857 return rb_str_concat(recv, obj);
6858 }
6859 }
6860 else if (RBASIC_CLASS(recv) == rb_cArray &&
6861 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6862 return rb_ary_push(recv, obj);
6863 }
6864 else {
6865 return Qundef;
6866 }
6867}
6868
6869static VALUE
6870vm_opt_and(VALUE recv, VALUE obj)
6871{
6872 // If recv and obj are both fixnums, then the bottom tag bit
6873 // will be 1 on both. 1 & 1 == 1, so the result value will also
6874 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6875 // will be 0, and we return Qundef.
6876 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6877
6878 if (FIXNUM_P(ret) &&
6879 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6880 return ret;
6881 }
6882 else {
6883 return Qundef;
6884 }
6885}
6886
6887static VALUE
6888vm_opt_or(VALUE recv, VALUE obj)
6889{
6890 if (FIXNUM_2_P(recv, obj) &&
6891 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6892 return recv | obj;
6893 }
6894 else {
6895 return Qundef;
6896 }
6897}
6898
6899static VALUE
6900vm_opt_aref(VALUE recv, VALUE obj)
6901{
6902 if (SPECIAL_CONST_P(recv)) {
6903 if (FIXNUM_2_P(recv, obj) &&
6904 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6905 return rb_fix_aref(recv, obj);
6906 }
6907 return Qundef;
6908 }
6909 else if (RBASIC_CLASS(recv) == rb_cArray &&
6910 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6911 if (FIXNUM_P(obj)) {
6912 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6913 }
6914 else {
6915 return rb_ary_aref1(recv, obj);
6916 }
6917 }
6918 else if (RBASIC_CLASS(recv) == rb_cHash &&
6919 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6920 return rb_hash_aref(recv, obj);
6921 }
6922 else {
6923 return Qundef;
6924 }
6925}
6926
6927static VALUE
6928vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6929{
6930 if (SPECIAL_CONST_P(recv)) {
6931 return Qundef;
6932 }
6933 else if (RBASIC_CLASS(recv) == rb_cArray &&
6934 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6935 FIXNUM_P(obj)) {
6936 rb_ary_store(recv, FIX2LONG(obj), set);
6937 return set;
6938 }
6939 else if (RBASIC_CLASS(recv) == rb_cHash &&
6940 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6941 rb_hash_aset(recv, obj, set);
6942 return set;
6943 }
6944 else {
6945 return Qundef;
6946 }
6947}
6948
6949static VALUE
6950vm_opt_length(VALUE recv, int bop)
6951{
6952 if (SPECIAL_CONST_P(recv)) {
6953 return Qundef;
6954 }
6955 else if (RBASIC_CLASS(recv) == rb_cString &&
6956 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6957 if (bop == BOP_EMPTY_P) {
6958 return LONG2NUM(RSTRING_LEN(recv));
6959 }
6960 else {
6961 return rb_str_length(recv);
6962 }
6963 }
6964 else if (RBASIC_CLASS(recv) == rb_cArray &&
6965 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6966 return LONG2NUM(RARRAY_LEN(recv));
6967 }
6968 else if (RBASIC_CLASS(recv) == rb_cHash &&
6969 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6970 return INT2FIX(RHASH_SIZE(recv));
6971 }
6972 else {
6973 return Qundef;
6974 }
6975}
6976
6977static VALUE
6978vm_opt_empty_p(VALUE recv)
6979{
6980 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6981 case Qundef: return Qundef;
6982 case INT2FIX(0): return Qtrue;
6983 default: return Qfalse;
6984 }
6985}
6986
6987VALUE rb_false(VALUE obj);
6988
6989static VALUE
6990vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6991{
6992 if (NIL_P(recv) &&
6993 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6994 return Qtrue;
6995 }
6996 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6997 return Qfalse;
6998 }
6999 else {
7000 return Qundef;
7001 }
7002}
7003
7004static VALUE
7005fix_succ(VALUE x)
7006{
7007 switch (x) {
7008 case ~0UL:
7009 /* 0xFFFF_FFFF == INT2FIX(-1)
7010 * `-1.succ` is of course 0. */
7011 return INT2FIX(0);
7012 case RSHIFT(~0UL, 1):
7013 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7014 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7015 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7016 default:
7017 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7018 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7019 * == lx*2 + ly*2 + 1
7020 * == (lx*2+1) + (ly*2+1) - 1
7021 * == x + y - 1
7022 *
7023 * Here, if we put y := INT2FIX(1):
7024 *
7025 * == x + INT2FIX(1) - 1
7026 * == x + 2 .
7027 */
7028 return x + 2;
7029 }
7030}
7031
7032static VALUE
7033vm_opt_succ(VALUE recv)
7034{
7035 if (FIXNUM_P(recv) &&
7036 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7037 return fix_succ(recv);
7038 }
7039 else if (SPECIAL_CONST_P(recv)) {
7040 return Qundef;
7041 }
7042 else if (RBASIC_CLASS(recv) == rb_cString &&
7043 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7044 return rb_str_succ(recv);
7045 }
7046 else {
7047 return Qundef;
7048 }
7049}
7050
7051static VALUE
7052vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7053{
7054 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7055 return RBOOL(!RTEST(recv));
7056 }
7057 else {
7058 return Qundef;
7059 }
7060}
7061
7062static VALUE
7063vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7064{
7065 if (SPECIAL_CONST_P(recv)) {
7066 return Qundef;
7067 }
7068 else if (RBASIC_CLASS(recv) == rb_cString &&
7069 CLASS_OF(obj) == rb_cRegexp &&
7070 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7071 return rb_reg_match(obj, recv);
7072 }
7073 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7074 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7075 return rb_reg_match(recv, obj);
7076 }
7077 else {
7078 return Qundef;
7079 }
7080}
7081
7082rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7083
7084NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7085
7086static inline void
7087vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7088 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7089 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7090{
7091 rb_event_flag_t event = pc_events & target_event;
7092 VALUE self = GET_SELF();
7093
7094 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7095
7096 if (event & global_hooks->events) {
7097 /* increment PC because source line is calculated with PC-1 */
7098 reg_cfp->pc++;
7099 vm_dtrace(event, ec);
7100 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7101 reg_cfp->pc--;
7102 }
7103
7104 // Load here since global hook above can add and free local hooks
7105 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7106 if (local_hooks != NULL) {
7107 if (event & local_hooks->events) {
7108 /* increment PC because source line is calculated with PC-1 */
7109 reg_cfp->pc++;
7110 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7111 reg_cfp->pc--;
7112 }
7113 }
7114}
7115
7116#define VM_TRACE_HOOK(target_event, val) do { \
7117 if ((pc_events & (target_event)) & enabled_flags) { \
7118 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7119 } \
7120} while (0)
7121
7122static VALUE
7123rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7124{
7125 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7126 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7127 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7128}
7129
7130static void
7131vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7132{
7133 const VALUE *pc = reg_cfp->pc;
7134 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7135 rb_event_flag_t global_events = enabled_flags;
7136
7137 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7138 return;
7139 }
7140 else {
7141 const rb_iseq_t *iseq = reg_cfp->iseq;
7142 VALUE iseq_val = (VALUE)iseq;
7143 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7144 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7145 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7146 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7147 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7148 rb_hook_list_t *bmethod_local_hooks = NULL;
7149 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7150 rb_event_flag_t bmethod_local_events = 0;
7151 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7152 enabled_flags |= iseq_local_events;
7153
7154 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7155
7156 if (bmethod_frame) {
7157 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7158 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7159 bmethod_local_hooks = me->def->body.bmethod.hooks;
7160 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7161 if (bmethod_local_hooks) {
7162 bmethod_local_events = bmethod_local_hooks->events;
7163 }
7164 }
7165
7166
7167 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7168#if 0
7169 /* disable trace */
7170 /* TODO: incomplete */
7171 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7172#else
7173 /* do not disable trace because of performance problem
7174 * (re-enable overhead)
7175 */
7176#endif
7177 return;
7178 }
7179 else if (ec->trace_arg != NULL) {
7180 /* already tracing */
7181 return;
7182 }
7183 else {
7184 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7185 /* Note, not considering iseq local events here since the same
7186 * iseq could be used in multiple bmethods. */
7187 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7188
7189 if (0) {
7190 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7191 (int)pos,
7192 (int)pc_events,
7193 RSTRING_PTR(rb_iseq_path(iseq)),
7194 (int)rb_iseq_line_no(iseq, pos),
7195 RSTRING_PTR(rb_iseq_label(iseq)));
7196 }
7197 VM_ASSERT(reg_cfp->pc == pc);
7198 VM_ASSERT(pc_events != 0);
7199
7200 /* check traces */
7201 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7202 /* b_call instruction running as a method. Fire call event. */
7203 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7204 }
7206 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7207 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7208 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7209 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7210 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7211 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7212 /* b_return instruction running as a method. Fire return event. */
7213 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7214 }
7215
7216 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7217 // We need the pointer to stay valid in case compaction happens in a trace hook.
7218 //
7219 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7220 // storage for `rb_method_definition_t` is not on the GC heap.
7221 RB_GC_GUARD(iseq_val);
7222 }
7223 }
7224}
7225#undef VM_TRACE_HOOK
7226
7227#if VM_CHECK_MODE > 0
7228NORETURN( NOINLINE( COLDFUNC
7229void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7230
7231void
7232Init_vm_stack_canary(void)
7233{
7234 /* This has to be called _after_ our PRNG is properly set up. */
7235 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7236 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7237
7238 vm_stack_canary_was_born = true;
7239 VM_ASSERT(n == 0);
7240}
7241
7242void
7243rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7244{
7245 /* Because a method has already been called, why not call
7246 * another one. */
7247 const char *insn = rb_insns_name(i);
7248 VALUE inspection = rb_inspect(c);
7249 const char *str = StringValueCStr(inspection);
7250
7251 rb_bug("dead canary found at %s: %s", insn, str);
7252}
7253
7254#else
7255void Init_vm_stack_canary(void) { /* nothing to do */ }
7256#endif
7257
7258
7259/* a part of the following code is generated by this ruby script:
7260
726116.times{|i|
7262 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7263 typedef_args.prepend(", ") if i != 0
7264 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7265 call_args.prepend(", ") if i != 0
7266 puts %Q{
7267static VALUE
7268builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7269{
7270 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7271 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7272}}
7273}
7274
7275puts
7276puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
727716.times{|i|
7278 puts " builtin_invoker#{i},"
7279}
7280puts "};"
7281*/
7282
7283static VALUE
7284builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7285{
7286 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7287 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7288}
7289
7290static VALUE
7291builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7292{
7293 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7294 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7295}
7296
7297static VALUE
7298builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7299{
7300 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7301 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7302}
7303
7304static VALUE
7305builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7306{
7307 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7308 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7309}
7310
7311static VALUE
7312builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7313{
7314 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7315 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7316}
7317
7318static VALUE
7319builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7320{
7321 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7322 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7323}
7324
7325static VALUE
7326builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7327{
7328 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7329 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7330}
7331
7332static VALUE
7333builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7334{
7335 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7336 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7337}
7338
7339static VALUE
7340builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7341{
7342 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7343 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7344}
7345
7346static VALUE
7347builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7348{
7349 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7350 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7351}
7352
7353static VALUE
7354builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7355{
7356 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7357 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7358}
7359
7360static VALUE
7361builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7362{
7363 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7364 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7365}
7366
7367static VALUE
7368builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7369{
7370 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7371 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7372}
7373
7374static VALUE
7375builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7376{
7377 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7378 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7379}
7380
7381static VALUE
7382builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7383{
7384 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7385 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7386}
7387
7388static VALUE
7389builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7390{
7391 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7392 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7393}
7394
7395typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7396
7397static builtin_invoker
7398lookup_builtin_invoker(int argc)
7399{
7400 static const builtin_invoker invokers[] = {
7401 builtin_invoker0,
7402 builtin_invoker1,
7403 builtin_invoker2,
7404 builtin_invoker3,
7405 builtin_invoker4,
7406 builtin_invoker5,
7407 builtin_invoker6,
7408 builtin_invoker7,
7409 builtin_invoker8,
7410 builtin_invoker9,
7411 builtin_invoker10,
7412 builtin_invoker11,
7413 builtin_invoker12,
7414 builtin_invoker13,
7415 builtin_invoker14,
7416 builtin_invoker15,
7417 };
7418
7419 return invokers[argc];
7420}
7421
7422static inline VALUE
7423invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7424{
7425 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7426 SETUP_CANARY(canary_p);
7427 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7428 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7429 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7430 return ret;
7431}
7432
7433static VALUE
7434vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7435{
7436 return invoke_bf(ec, cfp, bf, argv);
7437}
7438
7439static VALUE
7440vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7441{
7442 if (0) { // debug print
7443 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7444 for (int i=0; i<bf->argc; i++) {
7445 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7446 }
7447 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7448 (void *)(uintptr_t)bf->func_ptr);
7449 }
7450
7451 if (bf->argc == 0) {
7452 return invoke_bf(ec, cfp, bf, NULL);
7453 }
7454 else {
7455 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7456 return invoke_bf(ec, cfp, bf, argv);
7457 }
7458}
7459
7460// for __builtin_inline!()
7461
7462VALUE
7463rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7464{
7465 const rb_control_frame_t *cfp = ec->cfp;
7466 return cfp->ep[index];
7467}
7468
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2795
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1573
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1465
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1444
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3839
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:695
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4160
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2122
VALUE rb_cRegexp
Regexp class.
Definition re.c:2657
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1308
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:242
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:654
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:232
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:877
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1032
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1947
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3717
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1922
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2004
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1905
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1971
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2037
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3757
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5392
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3723
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3994
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2397
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:937
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1491
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3418
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1949
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4209
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4265
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1454
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3885
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3253
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3424
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:422
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2024
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3747
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4287
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:379
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3741
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1610
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2177
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376