Ruby 3.5.0dev (2025-06-27 revision 4965954556b1db71fba6ce090cc217e97641687e)
vm_insnhelper.c (4965954556b1db71fba6ce090cc217e97641687e)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
766{
767 const VALUE *ep = cfp->ep;
769
770 while (!VM_ENV_LOCAL_P(ep)) {
771 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
772 ep = VM_ENV_PREV_EP(ep);
773 }
774
775 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
776}
777
778static const rb_iseq_t *
779method_entry_iseqptr(const rb_callable_method_entry_t *me)
780{
781 switch (me->def->type) {
782 case VM_METHOD_TYPE_ISEQ:
783 return me->def->body.iseq.iseqptr;
784 default:
785 return NULL;
786 }
787}
788
789static rb_cref_t *
790method_entry_cref(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.cref;
795 default:
796 return NULL;
797 }
798}
799
800#if VM_CHECK_MODE == 0
801PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
802#endif
803static rb_cref_t *
804check_cref(VALUE obj, int can_be_svar)
805{
806 if (obj == Qfalse) return NULL;
807
808#if VM_CHECK_MODE > 0
809 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
810#endif
811
812 switch (imemo_type(obj)) {
813 case imemo_ment:
814 return method_entry_cref((rb_callable_method_entry_t *)obj);
815 case imemo_cref:
816 return (rb_cref_t *)obj;
817 case imemo_svar:
818 if (can_be_svar) {
819 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
820 }
821 default:
822#if VM_CHECK_MODE > 0
823 rb_bug("check_method_entry: svar should not be there:");
824#endif
825 return NULL;
826 }
827}
828
829static inline rb_cref_t *
830vm_env_cref(const VALUE *ep)
831{
832 rb_cref_t *cref;
833
834 while (!VM_ENV_LOCAL_P(ep)) {
835 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
836 ep = VM_ENV_PREV_EP(ep);
837 }
838
839 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
840}
841
842static int
843is_cref(const VALUE v, int can_be_svar)
844{
845 if (RB_TYPE_P(v, T_IMEMO)) {
846 switch (imemo_type(v)) {
847 case imemo_cref:
848 return TRUE;
849 case imemo_svar:
850 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
851 default:
852 break;
853 }
854 }
855 return FALSE;
856}
857
858static int
859vm_env_cref_by_cref(const VALUE *ep)
860{
861 while (!VM_ENV_LOCAL_P(ep)) {
862 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
863 ep = VM_ENV_PREV_EP(ep);
864 }
865 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
866}
867
868static rb_cref_t *
869cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
870{
871 const VALUE v = *vptr;
872 rb_cref_t *cref, *new_cref;
873
874 if (RB_TYPE_P(v, T_IMEMO)) {
875 switch (imemo_type(v)) {
876 case imemo_cref:
877 cref = (rb_cref_t *)v;
878 new_cref = vm_cref_dup(cref);
879 if (parent) {
880 RB_OBJ_WRITE(parent, vptr, new_cref);
881 }
882 else {
883 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
884 }
885 return (rb_cref_t *)new_cref;
886 case imemo_svar:
887 if (can_be_svar) {
888 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
889 }
890 /* fall through */
891 case imemo_ment:
892 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
893 default:
894 break;
895 }
896 }
897 return NULL;
898}
899
900static rb_cref_t *
901vm_cref_replace_with_duplicated_cref(const VALUE *ep)
902{
903 if (vm_env_cref_by_cref(ep)) {
904 rb_cref_t *cref;
905 VALUE envval;
906
907 while (!VM_ENV_LOCAL_P(ep)) {
908 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
909 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
910 return cref;
911 }
912 ep = VM_ENV_PREV_EP(ep);
913 }
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
916 }
917 else {
918 rb_bug("vm_cref_dup: unreachable");
919 }
920}
921
922static rb_cref_t *
923vm_get_cref(const VALUE *ep)
924{
925 rb_cref_t *cref = vm_env_cref(ep);
926
927 if (cref != NULL) {
928 return cref;
929 }
930 else {
931 rb_bug("vm_get_cref: unreachable");
932 }
933}
934
935rb_cref_t *
936rb_vm_get_cref(const VALUE *ep)
937{
938 return vm_get_cref(ep);
939}
940
941static rb_cref_t *
942vm_ec_cref(const rb_execution_context_t *ec)
943{
944 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
945
946 if (cfp == NULL) {
947 return NULL;
948 }
949 return vm_get_cref(cfp->ep);
950}
951
952static const rb_cref_t *
953vm_get_const_key_cref(const VALUE *ep)
954{
955 const rb_cref_t *cref = vm_get_cref(ep);
956 const rb_cref_t *key_cref = cref;
957
958 while (cref) {
959 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
960 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
961 return key_cref;
962 }
963 cref = CREF_NEXT(cref);
964 }
965
966 /* does not include singleton class */
967 return NULL;
968}
969
970rb_cref_t *
971rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
972{
973 rb_cref_t *new_cref_head = NULL;
974 rb_cref_t *new_cref_tail = NULL;
975
976 #define ADD_NEW_CREF(new_cref) \
977 if (new_cref_tail) { \
978 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
979 } else { \
980 new_cref_head = new_cref; \
981 } \
982 new_cref_tail = new_cref;
983
984 while (cref) {
985 rb_cref_t *new_cref;
986 if (CREF_CLASS(cref) == old_klass) {
987 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
988 ADD_NEW_CREF(new_cref);
989 return new_cref_head;
990 }
991 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
992 cref = CREF_NEXT(cref);
993 ADD_NEW_CREF(new_cref);
994 }
995
996 #undef ADD_NEW_CREF
997
998 // Could we just reuse the original cref?
999 return new_cref_head;
1000}
1001
1002static rb_cref_t *
1003vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1004{
1005 rb_cref_t *prev_cref = NULL;
1006
1007 if (ep) {
1008 prev_cref = vm_env_cref(ep);
1009 }
1010 else {
1011 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1012
1013 if (cfp) {
1014 prev_cref = vm_env_cref(cfp->ep);
1015 }
1016 }
1017
1018 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1019}
1020
1021static inline VALUE
1022vm_get_cbase(const VALUE *ep)
1023{
1024 const rb_cref_t *cref = vm_get_cref(ep);
1025
1026 return CREF_CLASS_FOR_DEFINITION(cref);
1027}
1028
1029static inline VALUE
1030vm_get_const_base(const VALUE *ep)
1031{
1032 const rb_cref_t *cref = vm_get_cref(ep);
1033
1034 while (cref) {
1035 if (!CREF_PUSHED_BY_EVAL(cref)) {
1036 return CREF_CLASS_FOR_DEFINITION(cref);
1037 }
1038 cref = CREF_NEXT(cref);
1039 }
1040
1041 return Qundef;
1042}
1043
1044static inline void
1045vm_check_if_namespace(VALUE klass)
1046{
1047 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1048 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1049 }
1050}
1051
1052static inline void
1053vm_ensure_not_refinement_module(VALUE self)
1054{
1055 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1056 rb_warn("not defined at the refinement, but at the outer class/module");
1057 }
1058}
1059
1060static inline VALUE
1061vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1062{
1063 return klass;
1064}
1065
1066static inline VALUE
1067vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1068{
1069 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1070 VALUE val;
1071
1072 if (NIL_P(orig_klass) && allow_nil) {
1073 /* in current lexical scope */
1074 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1075 const rb_cref_t *cref;
1076 VALUE klass = Qnil;
1077
1078 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1079 root_cref = CREF_NEXT(root_cref);
1080 }
1081 cref = root_cref;
1082 while (cref && CREF_NEXT(cref)) {
1083 if (CREF_PUSHED_BY_EVAL(cref)) {
1084 klass = Qnil;
1085 }
1086 else {
1087 klass = CREF_CLASS(cref);
1088 }
1089 cref = CREF_NEXT(cref);
1090
1091 if (!NIL_P(klass)) {
1092 VALUE av, am = 0;
1093 rb_const_entry_t *ce;
1094 search_continue:
1095 if ((ce = rb_const_lookup(klass, id))) {
1096 rb_const_warn_if_deprecated(ce, klass, id);
1097 val = ce->value;
1098 if (UNDEF_P(val)) {
1099 if (am == klass) break;
1100 am = klass;
1101 if (is_defined) return 1;
1102 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1103 rb_autoload_load(klass, id);
1104 goto search_continue;
1105 }
1106 else {
1107 if (is_defined) {
1108 return 1;
1109 }
1110 else {
1111 if (UNLIKELY(!rb_ractor_main_p())) {
1112 if (!rb_ractor_shareable_p(val)) {
1113 rb_raise(rb_eRactorIsolationError,
1114 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1115 }
1116 }
1117 return val;
1118 }
1119 }
1120 }
1121 }
1122 }
1123
1124 /* search self */
1125 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1126 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1127 }
1128 else {
1129 klass = CLASS_OF(ec->cfp->self);
1130 }
1131
1132 if (is_defined) {
1133 return rb_const_defined(klass, id);
1134 }
1135 else {
1136 return rb_const_get(klass, id);
1137 }
1138 }
1139 else {
1140 vm_check_if_namespace(orig_klass);
1141 if (is_defined) {
1142 return rb_public_const_defined_from(orig_klass, id);
1143 }
1144 else {
1145 return rb_public_const_get_from(orig_klass, id);
1146 }
1147 }
1148}
1149
1150VALUE
1151rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1152{
1153 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1154}
1155
1156static inline VALUE
1157vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1158{
1159 VALUE val = Qnil;
1160 int idx = 0;
1161 int allow_nil = TRUE;
1162 if (segments[0] == idNULL) {
1163 val = rb_cObject;
1164 idx++;
1165 allow_nil = FALSE;
1166 }
1167 while (segments[idx]) {
1168 ID id = segments[idx++];
1169 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1170 allow_nil = FALSE;
1171 }
1172 return val;
1173}
1174
1175
1176static inline VALUE
1177vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1178{
1179 VALUE klass;
1180
1181 if (!cref) {
1182 rb_bug("vm_get_cvar_base: no cref");
1183 }
1184
1185 while (CREF_NEXT(cref) &&
1186 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1187 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1188 cref = CREF_NEXT(cref);
1189 }
1190 if (top_level_raise && !CREF_NEXT(cref)) {
1191 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1192 }
1193
1194 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1195
1196 if (NIL_P(klass)) {
1197 rb_raise(rb_eTypeError, "no class variables available");
1198 }
1199 return klass;
1200}
1201
1202ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1203static inline void
1204fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1205{
1206 if (is_attr) {
1207 vm_cc_attr_index_set(cc, index, shape_id);
1208 }
1209 else {
1210 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1211 }
1212}
1213
1214#define ractor_incidental_shareable_p(cond, val) \
1215 (!(cond) || rb_ractor_shareable_p(val))
1216#define ractor_object_incidental_shareable_p(obj, val) \
1217 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1218
1219ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1220static inline VALUE
1221vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1222{
1223 VALUE fields_obj;
1224#if OPT_IC_FOR_IVAR
1225 VALUE val = Qundef;
1226 VALUE *ivar_list;
1227
1228 if (SPECIAL_CONST_P(obj)) {
1229 return default_value;
1230 }
1231
1232 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(obj);
1233
1234 switch (BUILTIN_TYPE(obj)) {
1235 case T_OBJECT:
1236 ivar_list = ROBJECT_FIELDS(obj);
1237 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1238 break;
1239 case T_CLASS:
1240 case T_MODULE:
1241 {
1242 if (UNLIKELY(!rb_ractor_main_p())) {
1243 // For two reasons we can only use the fast path on the main
1244 // ractor.
1245 // First, only the main ractor is allowed to set ivars on classes
1246 // and modules. So we can skip locking.
1247 // Second, other ractors need to check the shareability of the
1248 // values returned from the class ivars.
1249
1250 if (default_value == Qundef) { // defined?
1251 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1252 }
1253 else {
1254 goto general_path;
1255 }
1256 }
1257
1258 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1259 if (!fields_obj) {
1260 return default_value;
1261 }
1262 ivar_list = rb_imemo_fields_ptr(fields_obj);
1263 shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1264
1265 break;
1266 }
1267 default:
1268 if (rb_obj_exivar_p(obj)) {
1269 VALUE fields_obj = 0;
1270 if (!rb_gen_fields_tbl_get(obj, id, &fields_obj)) {
1271 return default_value;
1272 }
1273 ivar_list = rb_imemo_fields_ptr(fields_obj);
1274 }
1275 else {
1276 return default_value;
1277 }
1278 }
1279
1280 shape_id_t cached_id;
1281 attr_index_t index;
1282
1283 if (is_attr) {
1284 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1285 }
1286 else {
1287 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1288 }
1289
1290 if (LIKELY(cached_id == shape_id)) {
1291 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1292
1293 if (index == ATTR_INDEX_NOT_SET) {
1294 return default_value;
1295 }
1296
1297 val = ivar_list[index];
1298#if USE_DEBUG_COUNTER
1299 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1300
1301 if (RB_TYPE_P(obj, T_OBJECT)) {
1302 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1303 }
1304#endif
1305 RUBY_ASSERT(!UNDEF_P(val));
1306 }
1307 else { // cache miss case
1308#if USE_DEBUG_COUNTER
1309 if (is_attr) {
1310 if (cached_id != INVALID_SHAPE_ID) {
1311 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1312 }
1313 else {
1314 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1315 }
1316 }
1317 else {
1318 if (cached_id != INVALID_SHAPE_ID) {
1319 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1320 }
1321 else {
1322 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1323 }
1324 }
1325 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1326
1327 if (RB_TYPE_P(obj, T_OBJECT)) {
1328 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1329 }
1330#endif
1331
1332 if (rb_shape_too_complex_p(shape_id)) {
1333 st_table *table = NULL;
1334 switch (BUILTIN_TYPE(obj)) {
1335 case T_CLASS:
1336 case T_MODULE:
1337 table = rb_imemo_fields_complex_tbl(fields_obj);
1338 break;
1339
1340 case T_OBJECT:
1341 table = ROBJECT_FIELDS_HASH(obj);
1342 break;
1343
1344 default: {
1345 VALUE fields_obj;
1346 if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
1347 table = rb_imemo_fields_complex_tbl(fields_obj);
1348 }
1349 break;
1350 }
1351 }
1352
1353 if (!table || !st_lookup(table, id, &val)) {
1354 val = default_value;
1355 }
1356 }
1357 else {
1358 shape_id_t previous_cached_id = cached_id;
1359 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1360 // This fills in the cache with the shared cache object.
1361 // "ent" is the shared cache object
1362 if (cached_id != previous_cached_id) {
1363 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1364 }
1365
1366 if (index == ATTR_INDEX_NOT_SET) {
1367 val = default_value;
1368 }
1369 else {
1370 // We fetched the ivar list above
1371 val = ivar_list[index];
1372 RUBY_ASSERT(!UNDEF_P(val));
1373 }
1374 }
1375 else {
1376 if (is_attr) {
1377 vm_cc_attr_index_initialize(cc, shape_id);
1378 }
1379 else {
1380 vm_ic_attr_index_initialize(ic, shape_id);
1381 }
1382
1383 val = default_value;
1384 }
1385 }
1386
1387 }
1388
1389 if (!UNDEF_P(default_value)) {
1390 RUBY_ASSERT(!UNDEF_P(val));
1391 }
1392
1393 RB_GC_GUARD(fields_obj);
1394 return val;
1395
1396general_path:
1397#endif /* OPT_IC_FOR_IVAR */
1398 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1399
1400 if (is_attr) {
1401 return rb_attr_get(obj, id);
1402 }
1403 else {
1404 return rb_ivar_get(obj, id);
1405 }
1406}
1407
1408static void
1409populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1410{
1411 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1412
1413 // Cache population code
1414 if (is_attr) {
1415 vm_cc_attr_index_set(cc, index, next_shape_id);
1416 }
1417 else {
1418 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1419 }
1420}
1421
1422ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1423NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1424NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1425
1426static VALUE
1427vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1428{
1429#if OPT_IC_FOR_IVAR
1430 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1431
1432 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1433 rb_check_frozen(obj);
1434
1435 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1436
1437 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1438
1439 if (!rb_shape_too_complex_p(next_shape_id)) {
1440 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1441 }
1442
1443 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1444 return val;
1445 }
1446#endif
1447 return rb_ivar_set(obj, id, val);
1448}
1449
1450static VALUE
1451vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1452{
1453 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1454}
1455
1456static VALUE
1457vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1458{
1459 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1460}
1461
1462NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1463static VALUE
1464vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1465{
1466 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1467
1468 VALUE fields_obj = 0;
1469
1470 // Cache hit case
1471 if (shape_id == dest_shape_id) {
1472 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1473 }
1474 else if (dest_shape_id != INVALID_SHAPE_ID) {
1475 if (shape_id == RSHAPE_PARENT(dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1476 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1477 }
1478 else {
1479 return Qundef;
1480 }
1481 }
1482 else {
1483 return Qundef;
1484 }
1485
1486 rb_gen_fields_tbl_get(obj, 0, &fields_obj);
1487
1488 if (shape_id != dest_shape_id) {
1489 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1490 }
1491
1492 RB_OBJ_WRITE(obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1493
1494 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1495
1496 return val;
1497}
1498
1499static inline VALUE
1500vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1501{
1502#if OPT_IC_FOR_IVAR
1503 switch (BUILTIN_TYPE(obj)) {
1504 case T_OBJECT:
1505 {
1506 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1507
1508 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1509 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1510
1511 if (LIKELY(shape_id == dest_shape_id)) {
1512 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1513 VM_ASSERT(!rb_ractor_shareable_p(obj));
1514 }
1515 else if (dest_shape_id != INVALID_SHAPE_ID) {
1516 shape_id_t source_shape_id = RSHAPE_PARENT(dest_shape_id);
1517
1518 if (shape_id == source_shape_id && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1519 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1520
1521 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1522
1523 RUBY_ASSERT(rb_shape_get_next_iv_shape(source_shape_id, id) == dest_shape_id);
1524 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1525 }
1526 else {
1527 break;
1528 }
1529 }
1530 else {
1531 break;
1532 }
1533
1534 VALUE *ptr = ROBJECT_FIELDS(obj);
1535
1536 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1537 RB_OBJ_WRITE(obj, &ptr[index], val);
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1541 return val;
1542 }
1543 break;
1544 case T_CLASS:
1545 case T_MODULE:
1546 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1547 default:
1548 break;
1549 }
1550
1551 return Qundef;
1552#endif /* OPT_IC_FOR_IVAR */
1553}
1554
1555static VALUE
1556update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1557{
1558 VALUE defined_class = 0;
1559 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1560
1561 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1562 defined_class = RBASIC(defined_class)->klass;
1563 }
1564
1565 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1566 if (!rb_cvc_tbl) {
1567 rb_bug("the cvc table should be set");
1568 }
1569
1570 VALUE ent_data;
1571 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1572 rb_bug("should have cvar cache entry");
1573 }
1574
1575 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1576
1577 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1578 ent->cref = cref;
1579 ic->entry = ent;
1580
1581 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1582 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1583 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1584 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1585
1586 return cvar_value;
1587}
1588
1589static inline VALUE
1590vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1591{
1592 const rb_cref_t *cref;
1593 cref = vm_get_cref(GET_EP());
1594
1595 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1596 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1597
1598 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1599 RUBY_ASSERT(!UNDEF_P(v));
1600
1601 return v;
1602 }
1603
1604 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1605
1606 return update_classvariable_cache(iseq, klass, id, cref, ic);
1607}
1608
1609VALUE
1610rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1611{
1612 return vm_getclassvariable(iseq, cfp, id, ic);
1613}
1614
1615static inline void
1616vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1617{
1618 const rb_cref_t *cref;
1619 cref = vm_get_cref(GET_EP());
1620
1621 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1622 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1623
1624 rb_class_ivar_set(ic->entry->class_value, id, val);
1625 return;
1626 }
1627
1628 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1629
1630 rb_cvar_set(klass, id, val);
1631
1632 update_classvariable_cache(iseq, klass, id, cref, ic);
1633}
1634
1635void
1636rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1637{
1638 vm_setclassvariable(iseq, cfp, id, val, ic);
1639}
1640
1641static inline VALUE
1642vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1643{
1644 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1645}
1646
1647static inline void
1648vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1649{
1650 if (RB_SPECIAL_CONST_P(obj)) {
1652 return;
1653 }
1654
1655 shape_id_t dest_shape_id;
1656 attr_index_t index;
1657 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1658
1659 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1660 switch (BUILTIN_TYPE(obj)) {
1661 case T_OBJECT:
1662 case T_CLASS:
1663 case T_MODULE:
1664 break;
1665 default:
1666 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1667 return;
1668 }
1669 }
1670 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1671 }
1672}
1673
1674void
1675rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1676{
1677 vm_setinstancevariable(iseq, obj, id, val, ic);
1678}
1679
1680static VALUE
1681vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1682{
1683 /* continue throw */
1684
1685 if (FIXNUM_P(err)) {
1686 ec->tag->state = RUBY_TAG_FATAL;
1687 }
1688 else if (SYMBOL_P(err)) {
1689 ec->tag->state = TAG_THROW;
1690 }
1691 else if (THROW_DATA_P(err)) {
1692 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1693 }
1694 else {
1695 ec->tag->state = TAG_RAISE;
1696 }
1697 return err;
1698}
1699
1700static VALUE
1701vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1702 const int flag, const VALUE throwobj)
1703{
1704 const rb_control_frame_t *escape_cfp = NULL;
1705 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1706
1707 if (flag != 0) {
1708 /* do nothing */
1709 }
1710 else if (state == TAG_BREAK) {
1711 int is_orphan = 1;
1712 const VALUE *ep = GET_EP();
1713 const rb_iseq_t *base_iseq = GET_ISEQ();
1714 escape_cfp = reg_cfp;
1715
1716 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1717 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1718 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1719 ep = escape_cfp->ep;
1720 base_iseq = escape_cfp->iseq;
1721 }
1722 else {
1723 ep = VM_ENV_PREV_EP(ep);
1724 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1725 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1726 VM_ASSERT(escape_cfp->iseq == base_iseq);
1727 }
1728 }
1729
1730 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1731 /* lambda{... break ...} */
1732 is_orphan = 0;
1733 state = TAG_RETURN;
1734 }
1735 else {
1736 ep = VM_ENV_PREV_EP(ep);
1737
1738 while (escape_cfp < eocfp) {
1739 if (escape_cfp->ep == ep) {
1740 const rb_iseq_t *const iseq = escape_cfp->iseq;
1741 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1742 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1743 unsigned int i;
1744
1745 if (!ct) break;
1746 for (i=0; i < ct->size; i++) {
1747 const struct iseq_catch_table_entry *const entry =
1748 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1749
1750 if (entry->type == CATCH_TYPE_BREAK &&
1751 entry->iseq == base_iseq &&
1752 entry->start < epc && entry->end >= epc) {
1753 if (entry->cont == epc) { /* found! */
1754 is_orphan = 0;
1755 }
1756 break;
1757 }
1758 }
1759 break;
1760 }
1761
1762 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1763 }
1764 }
1765
1766 if (is_orphan) {
1767 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1768 }
1769 }
1770 else if (state == TAG_RETRY) {
1771 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1772
1773 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1774 }
1775 else if (state == TAG_RETURN) {
1776 const VALUE *current_ep = GET_EP();
1777 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1778 int in_class_frame = 0;
1779 int toplevel = 1;
1780 escape_cfp = reg_cfp;
1781
1782 // find target_lep, target_ep
1783 while (!VM_ENV_LOCAL_P(ep)) {
1784 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1785 target_ep = ep;
1786 }
1787 ep = VM_ENV_PREV_EP(ep);
1788 }
1789 target_lep = ep;
1790
1791 while (escape_cfp < eocfp) {
1792 const VALUE *lep = VM_CF_LEP(escape_cfp);
1793
1794 if (!target_lep) {
1795 target_lep = lep;
1796 }
1797
1798 if (lep == target_lep &&
1799 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1800 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1801 in_class_frame = 1;
1802 target_lep = 0;
1803 }
1804
1805 if (lep == target_lep) {
1806 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1807 toplevel = 0;
1808 if (in_class_frame) {
1809 /* lambda {class A; ... return ...; end} */
1810 goto valid_return;
1811 }
1812 else {
1813 const VALUE *tep = current_ep;
1814
1815 while (target_lep != tep) {
1816 if (escape_cfp->ep == tep) {
1817 /* in lambda */
1818 if (tep == target_ep) {
1819 goto valid_return;
1820 }
1821 else {
1822 goto unexpected_return;
1823 }
1824 }
1825 tep = VM_ENV_PREV_EP(tep);
1826 }
1827 }
1828 }
1829 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1830 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1831 case ISEQ_TYPE_TOP:
1832 case ISEQ_TYPE_MAIN:
1833 if (toplevel) {
1834 if (in_class_frame) goto unexpected_return;
1835 if (target_ep == NULL) {
1836 goto valid_return;
1837 }
1838 else {
1839 goto unexpected_return;
1840 }
1841 }
1842 break;
1843 case ISEQ_TYPE_EVAL: {
1844 const rb_iseq_t *is = escape_cfp->iseq;
1845 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1846 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1847 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1848 t = ISEQ_BODY(is)->type;
1849 }
1850 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1851 break;
1852 }
1853 case ISEQ_TYPE_CLASS:
1854 toplevel = 0;
1855 break;
1856 default:
1857 break;
1858 }
1859 }
1860 }
1861
1862 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1863 if (target_ep == NULL) {
1864 goto valid_return;
1865 }
1866 else {
1867 goto unexpected_return;
1868 }
1869 }
1870
1871 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1872 }
1873 unexpected_return:;
1874 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1875
1876 valid_return:;
1877 /* do nothing */
1878 }
1879 else {
1880 rb_bug("isns(throw): unsupported throw type");
1881 }
1882
1883 ec->tag->state = state;
1884 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1885}
1886
1887static VALUE
1888vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1889 rb_num_t throw_state, VALUE throwobj)
1890{
1891 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1892 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1893
1894 if (state != 0) {
1895 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1896 }
1897 else {
1898 return vm_throw_continue(ec, throwobj);
1899 }
1900}
1901
1902VALUE
1903rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1904{
1905 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1906}
1907
1908static inline void
1909vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1910{
1911 int is_splat = flag & 0x01;
1912 const VALUE *ptr;
1913 rb_num_t len;
1914 const VALUE obj = ary;
1915
1916 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1917 ary = obj;
1918 ptr = &ary;
1919 len = 1;
1920 }
1921 else {
1922 ptr = RARRAY_CONST_PTR(ary);
1923 len = (rb_num_t)RARRAY_LEN(ary);
1924 }
1925
1926 if (num + is_splat == 0) {
1927 /* no space left on stack */
1928 }
1929 else if (flag & 0x02) {
1930 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1931 rb_num_t i = 0, j;
1932
1933 if (len < num) {
1934 for (i = 0; i < num - len; i++) {
1935 *cfp->sp++ = Qnil;
1936 }
1937 }
1938
1939 for (j = 0; i < num; i++, j++) {
1940 VALUE v = ptr[len - j - 1];
1941 *cfp->sp++ = v;
1942 }
1943
1944 if (is_splat) {
1945 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1946 }
1947 }
1948 else {
1949 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1950 if (is_splat) {
1951 if (num > len) {
1952 *cfp->sp++ = rb_ary_new();
1953 }
1954 else {
1955 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1956 }
1957 }
1958
1959 if (num > len) {
1960 rb_num_t i = 0;
1961 for (; i < num - len; i++) {
1962 *cfp->sp++ = Qnil;
1963 }
1964
1965 for (rb_num_t j = 0; i < num; i++, j++) {
1966 *cfp->sp++ = ptr[len - j - 1];
1967 }
1968 }
1969 else {
1970 for (rb_num_t j = 0; j < num; j++) {
1971 *cfp->sp++ = ptr[num - j - 1];
1972 }
1973 }
1974 }
1975
1976 RB_GC_GUARD(ary);
1977}
1978
1979static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1980
1981static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1982
1983static struct rb_class_cc_entries *
1984vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1985{
1986 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1987#if VM_CHECK_MODE > 0
1988 ccs->debug_sig = ~(VALUE)ccs;
1989#endif
1990 ccs->capa = 0;
1991 ccs->len = 0;
1992 ccs->cme = cme;
1993 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1994 ccs->entries = NULL;
1995
1996 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1997 RB_OBJ_WRITTEN(klass, Qundef, cme);
1998 return ccs;
1999}
2000
2001static void
2002vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2003{
2004 if (! vm_cc_markable(cc)) {
2005 return;
2006 }
2007
2008 if (UNLIKELY(ccs->len == ccs->capa)) {
2009 if (ccs->capa == 0) {
2010 ccs->capa = 1;
2011 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2012 }
2013 else {
2014 ccs->capa *= 2;
2015 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2016 }
2017 }
2018 VM_ASSERT(ccs->len < ccs->capa);
2019
2020 const int pos = ccs->len++;
2021 ccs->entries[pos].argc = vm_ci_argc(ci);
2022 ccs->entries[pos].flag = vm_ci_flag(ci);
2023 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2024
2025 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2026 // for tuning
2027 // vm_mtbl_dump(klass, 0);
2028 }
2029}
2030
2031#if VM_CHECK_MODE > 0
2032void
2033rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2034{
2035 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2036 for (int i=0; i<ccs->len; i++) {
2037 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2038 ccs->entries[i].flag,
2039 ccs->entries[i].argc);
2040 rp(ccs->entries[i].cc);
2041 }
2042}
2043
2044static int
2045vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2046{
2047 VM_ASSERT(vm_ccs_p(ccs));
2048 VM_ASSERT(ccs->len <= ccs->capa);
2049
2050 for (int i=0; i<ccs->len; i++) {
2051 const struct rb_callcache *cc = ccs->entries[i].cc;
2052
2053 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2054 VM_ASSERT(vm_cc_class_check(cc, klass));
2055 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2056 VM_ASSERT(!vm_cc_super_p(cc));
2057 VM_ASSERT(!vm_cc_refinement_p(cc));
2058 }
2059 return TRUE;
2060}
2061#endif
2062
2063const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2064
2065static const struct rb_callcache *
2066vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2067{
2068 const ID mid = vm_ci_mid(ci);
2069 struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2070 struct rb_class_cc_entries *ccs = NULL;
2071 VALUE ccs_data;
2072
2073 if (cc_tbl) {
2074 // CCS data is keyed on method id, so we don't need the method id
2075 // for doing comparisons in the `for` loop below.
2076 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2077 ccs = (struct rb_class_cc_entries *)ccs_data;
2078 const int ccs_len = ccs->len;
2079
2080 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2081 rb_vm_ccs_free(ccs);
2082 rb_id_table_delete(cc_tbl, mid);
2083 ccs = NULL;
2084 }
2085 else {
2086 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2087
2088 // We already know the method id is correct because we had
2089 // to look up the ccs_data by method id. All we need to
2090 // compare is argc and flag
2091 unsigned int argc = vm_ci_argc(ci);
2092 unsigned int flag = vm_ci_flag(ci);
2093
2094 for (int i=0; i<ccs_len; i++) {
2095 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2096 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2097 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2098
2099 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2100
2101 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2102 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2103
2104 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2105 VM_ASSERT(ccs_cc->klass == klass);
2106 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2107
2108 return ccs_cc;
2109 }
2110 }
2111 }
2112 }
2113 }
2114 else {
2115 cc_tbl = rb_id_table_create(2);
2116 RCLASS_WRITE_CC_TBL(klass, cc_tbl);
2117 }
2118
2119 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2120
2121 const rb_callable_method_entry_t *cme;
2122
2123 if (ccs) {
2124 cme = ccs->cme;
2125 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2126
2127 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2128 }
2129 else {
2130 cme = rb_callable_method_entry(klass, mid);
2131 }
2132
2133 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2134
2135 if (cme == NULL) {
2136 // undef or not found: can't cache the information
2137 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2138 return &vm_empty_cc;
2139 }
2140
2141 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2142
2143 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2144
2145 if (ccs == NULL) {
2146 VM_ASSERT(cc_tbl != NULL);
2147
2148 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2149 // rb_callable_method_entry() prepares ccs.
2150 ccs = (struct rb_class_cc_entries *)ccs_data;
2151 }
2152 else {
2153 // TODO: required?
2154 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2155 }
2156 }
2157
2158 cme = rb_check_overloaded_cme(cme, ci);
2159
2160 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2161 vm_ccs_push(klass, ccs, ci, cc);
2162
2163 VM_ASSERT(vm_cc_cme(cc) != NULL);
2164 VM_ASSERT(cme->called_id == mid);
2165 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2166
2167 return cc;
2168}
2169
2170const struct rb_callcache *
2171rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2172{
2173 const struct rb_callcache *cc;
2174
2175 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2176
2177 RB_VM_LOCKING() {
2178 cc = vm_search_cc(klass, ci);
2179
2180 VM_ASSERT(cc);
2181 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2182 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2183 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2184 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2185 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2186 }
2187
2188 return cc;
2189}
2190
2191static const struct rb_callcache *
2192vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2193{
2194#if USE_DEBUG_COUNTER
2195 const struct rb_callcache *old_cc = cd->cc;
2196#endif
2197
2198 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2199
2200#if OPT_INLINE_METHOD_CACHE
2201 cd->cc = cc;
2202
2203 const struct rb_callcache *empty_cc = &vm_empty_cc;
2204 if (cd_owner && cc != empty_cc) {
2205 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2206 }
2207
2208#if USE_DEBUG_COUNTER
2209 if (!old_cc || old_cc == empty_cc) {
2210 // empty
2211 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2212 }
2213 else if (old_cc == cc) {
2214 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2215 }
2216 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2217 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2218 }
2219 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2220 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2221 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2222 }
2223 else {
2224 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2225 }
2226#endif
2227#endif // OPT_INLINE_METHOD_CACHE
2228
2229 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2230 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2231
2232 return cc;
2233}
2234
2235ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2236static const struct rb_callcache *
2237vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2238{
2239 const struct rb_callcache *cc = cd->cc;
2240
2241#if OPT_INLINE_METHOD_CACHE
2242 if (LIKELY(vm_cc_class_check(cc, klass))) {
2243 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2244 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2245 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2246 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2247 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2248 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2249
2250 return cc;
2251 }
2252 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2253 }
2254 else {
2255 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2256 }
2257#endif
2258
2259 return vm_search_method_slowpath0(cd_owner, cd, klass);
2260}
2261
2262static const struct rb_callcache *
2263vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2264{
2265 VALUE klass = CLASS_OF(recv);
2266 VM_ASSERT(klass != Qfalse);
2267 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2268
2269 return vm_search_method_fastpath(cd_owner, cd, klass);
2270}
2271
2272#if __has_attribute(transparent_union)
2273typedef union {
2274 VALUE (*anyargs)(ANYARGS);
2275 VALUE (*f00)(VALUE);
2276 VALUE (*f01)(VALUE, VALUE);
2277 VALUE (*f02)(VALUE, VALUE, VALUE);
2278 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2279 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2280 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2281 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2282 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2291 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2292} __attribute__((__transparent_union__)) cfunc_type;
2293# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2294#else
2295typedef VALUE (*cfunc_type)(ANYARGS);
2296# define make_cfunc_type(f) (cfunc_type)(f)
2297#endif
2298
2299static inline int
2300check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2301{
2302 if (! me) {
2303 return false;
2304 }
2305 else {
2306 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2307 VM_ASSERT(callable_method_entry_p(me));
2308 VM_ASSERT(me->def);
2309 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2310 return false;
2311 }
2312 else {
2313#if __has_attribute(transparent_union)
2314 return me->def->body.cfunc.func == func.anyargs;
2315#else
2316 return me->def->body.cfunc.func == func;
2317#endif
2318 }
2319 }
2320}
2321
2322static inline int
2323check_method_basic_definition(const rb_callable_method_entry_t *me)
2324{
2325 return me && METHOD_ENTRY_BASIC(me);
2326}
2327
2328static inline int
2329vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2330{
2331 VM_ASSERT(iseq != NULL);
2332 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2333 return check_cfunc(vm_cc_cme(cc), func);
2334}
2335
2336#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2337#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2338
2339#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2340
2341static inline bool
2342FIXNUM_2_P(VALUE a, VALUE b)
2343{
2344 /* FIXNUM_P(a) && FIXNUM_P(b)
2345 * == ((a & 1) && (b & 1))
2346 * == a & b & 1 */
2347 SIGNED_VALUE x = a;
2348 SIGNED_VALUE y = b;
2349 SIGNED_VALUE z = x & y & 1;
2350 return z == 1;
2351}
2352
2353static inline bool
2354FLONUM_2_P(VALUE a, VALUE b)
2355{
2356#if USE_FLONUM
2357 /* FLONUM_P(a) && FLONUM_P(b)
2358 * == ((a & 3) == 2) && ((b & 3) == 2)
2359 * == ! ((a ^ 2) | (b ^ 2) & 3)
2360 */
2361 SIGNED_VALUE x = a;
2362 SIGNED_VALUE y = b;
2363 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2364 return !z;
2365#else
2366 return false;
2367#endif
2368}
2369
2370static VALUE
2371opt_equality_specialized(VALUE recv, VALUE obj)
2372{
2373 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2374 goto compare_by_identity;
2375 }
2376 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2377 goto compare_by_identity;
2378 }
2379 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2380 goto compare_by_identity;
2381 }
2382 else if (SPECIAL_CONST_P(recv)) {
2383 //
2384 }
2385 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2386 double a = RFLOAT_VALUE(recv);
2387 double b = RFLOAT_VALUE(obj);
2388
2389#if MSC_VERSION_BEFORE(1300)
2390 if (isnan(a)) {
2391 return Qfalse;
2392 }
2393 else if (isnan(b)) {
2394 return Qfalse;
2395 }
2396 else
2397#endif
2398 return RBOOL(a == b);
2399 }
2400 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2401 if (recv == obj) {
2402 return Qtrue;
2403 }
2404 else if (RB_TYPE_P(obj, T_STRING)) {
2405 return rb_str_eql_internal(obj, recv);
2406 }
2407 }
2408 return Qundef;
2409
2410 compare_by_identity:
2411 return RBOOL(recv == obj);
2412}
2413
2414static VALUE
2415opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2416{
2417 VM_ASSERT(cd_owner != NULL);
2418
2419 VALUE val = opt_equality_specialized(recv, obj);
2420 if (!UNDEF_P(val)) return val;
2421
2422 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2423 return Qundef;
2424 }
2425 else {
2426 return RBOOL(recv == obj);
2427 }
2428}
2429
2430#undef EQ_UNREDEFINED_P
2431
2432static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2433NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2434
2435static VALUE
2436opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2437{
2438 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2439
2440 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2441 return RBOOL(recv == obj);
2442 }
2443 else {
2444 return Qundef;
2445 }
2446}
2447
2448static VALUE
2449opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2450{
2451 VALUE val = opt_equality_specialized(recv, obj);
2452 if (!UNDEF_P(val)) {
2453 return val;
2454 }
2455 else {
2456 return opt_equality_by_mid_slowpath(recv, obj, mid);
2457 }
2458}
2459
2460VALUE
2461rb_equal_opt(VALUE obj1, VALUE obj2)
2462{
2463 return opt_equality_by_mid(obj1, obj2, idEq);
2464}
2465
2466VALUE
2467rb_eql_opt(VALUE obj1, VALUE obj2)
2468{
2469 return opt_equality_by_mid(obj1, obj2, idEqlP);
2470}
2471
2472extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2473extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2474
2475static VALUE
2476check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2477{
2478 switch (type) {
2479 case VM_CHECKMATCH_TYPE_WHEN:
2480 return pattern;
2481 case VM_CHECKMATCH_TYPE_RESCUE:
2482 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2483 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2484 }
2485 /* fall through */
2486 case VM_CHECKMATCH_TYPE_CASE: {
2487 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2488 }
2489 default:
2490 rb_bug("check_match: unreachable");
2491 }
2492}
2493
2494
2495#if MSC_VERSION_BEFORE(1300)
2496#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2497#else
2498#define CHECK_CMP_NAN(a, b) /* do nothing */
2499#endif
2500
2501static inline VALUE
2502double_cmp_lt(double a, double b)
2503{
2504 CHECK_CMP_NAN(a, b);
2505 return RBOOL(a < b);
2506}
2507
2508static inline VALUE
2509double_cmp_le(double a, double b)
2510{
2511 CHECK_CMP_NAN(a, b);
2512 return RBOOL(a <= b);
2513}
2514
2515static inline VALUE
2516double_cmp_gt(double a, double b)
2517{
2518 CHECK_CMP_NAN(a, b);
2519 return RBOOL(a > b);
2520}
2521
2522static inline VALUE
2523double_cmp_ge(double a, double b)
2524{
2525 CHECK_CMP_NAN(a, b);
2526 return RBOOL(a >= b);
2527}
2528
2529// Copied by vm_dump.c
2530static inline VALUE *
2531vm_base_ptr(const rb_control_frame_t *cfp)
2532{
2533 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2534
2535 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2536 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2537
2538 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2539 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2540 int params = ISEQ_BODY(cfp->iseq)->param.size;
2541
2542 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2543 bp += vm_ci_argc(ci);
2544 }
2545
2546 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2547 /* adjust `self' */
2548 bp += 1;
2549 }
2550#if VM_DEBUG_BP_CHECK
2551 if (bp != cfp->bp_check) {
2552 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2553 (long)(cfp->bp_check - GET_EC()->vm_stack),
2554 (long)(bp - GET_EC()->vm_stack));
2555 rb_bug("vm_base_ptr: unreachable");
2556 }
2557#endif
2558 return bp;
2559 }
2560 else {
2561 return NULL;
2562 }
2563}
2564
2565VALUE *
2566rb_vm_base_ptr(const rb_control_frame_t *cfp)
2567{
2568 return vm_base_ptr(cfp);
2569}
2570
2571/* method call processes with call_info */
2572
2573#include "vm_args.c"
2574
2575static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2576ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2577static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2578static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2579static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2580static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2581static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2582
2583static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2584
2585static VALUE
2586vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2587{
2588 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2589
2590 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2591}
2592
2593static VALUE
2594vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2595{
2596 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2597
2598 const struct rb_callcache *cc = calling->cc;
2599 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2600 int param = ISEQ_BODY(iseq)->param.size;
2601 int local = ISEQ_BODY(iseq)->local_table_size;
2602 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2603}
2604
2605bool
2606rb_simple_iseq_p(const rb_iseq_t *iseq)
2607{
2608 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2609 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2610 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2611 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2612 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2613 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2614 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2615 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2616}
2617
2618bool
2619rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2620{
2621 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2622 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2623 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2624 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2625 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2626 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2627 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2628 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2629}
2630
2631bool
2632rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2633{
2634 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2635 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2636 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2637 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2638 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2639 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2640 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2641}
2642
2643#define ALLOW_HEAP_ARGV (-2)
2644#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2645
2646static inline bool
2647vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2648{
2649 vm_check_canary(GET_EC(), cfp->sp);
2650 bool ret = false;
2651
2652 if (!NIL_P(ary)) {
2653 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2654 long len = RARRAY_LEN(ary);
2655 int argc = calling->argc;
2656
2657 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2658 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2659 * a temporary array, instead of trying to keeping arguments on the VM stack.
2660 */
2661 VALUE *argv = cfp->sp - argc;
2662 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2663 rb_ary_cat(argv_ary, argv, argc);
2664 rb_ary_cat(argv_ary, ptr, len);
2665 cfp->sp -= argc - 1;
2666 cfp->sp[-1] = argv_ary;
2667 calling->argc = 1;
2668 calling->heap_argv = argv_ary;
2669 RB_GC_GUARD(ary);
2670 }
2671 else {
2672 long i;
2673
2674 if (max_args >= 0 && len + argc > max_args) {
2675 /* If only a given max_args is allowed, copy up to max args.
2676 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2677 * where additional arguments are ignored.
2678 *
2679 * Also, copy up to one more argument than the maximum,
2680 * in case it is an empty keyword hash that will be removed.
2681 */
2682 calling->argc += len - (max_args - argc + 1);
2683 len = max_args - argc + 1;
2684 ret = true;
2685 }
2686 else {
2687 /* Unset heap_argv if set originally. Can happen when
2688 * forwarding modified arguments, where heap_argv was used
2689 * originally, but heap_argv not supported by the forwarded
2690 * method in all cases.
2691 */
2692 calling->heap_argv = 0;
2693 }
2694 CHECK_VM_STACK_OVERFLOW(cfp, len);
2695
2696 for (i = 0; i < len; i++) {
2697 *cfp->sp++ = ptr[i];
2698 }
2699 calling->argc += i;
2700 }
2701 }
2702
2703 return ret;
2704}
2705
2706static inline void
2707vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2708{
2709 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2710 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2711 const VALUE h = rb_hash_new_with_size(kw_len);
2712 VALUE *sp = cfp->sp;
2713 int i;
2714
2715 for (i=0; i<kw_len; i++) {
2716 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2717 }
2718 (sp-kw_len)[0] = h;
2719
2720 cfp->sp -= kw_len - 1;
2721 calling->argc -= kw_len - 1;
2722 calling->kw_splat = 1;
2723}
2724
2725static inline VALUE
2726vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2727{
2728 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2729 if (keyword_hash != Qnil) {
2730 /* Convert a non-hash keyword splat to a new hash */
2731 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2732 }
2733 }
2734 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2735 /* Convert a hash keyword splat to a new hash unless
2736 * a mutable keyword splat was passed.
2737 * Skip allocating new hash for empty keyword splat, as empty
2738 * keyword splat will be ignored by both callers.
2739 */
2740 keyword_hash = rb_hash_dup(keyword_hash);
2741 }
2742 return keyword_hash;
2743}
2744
2745static inline void
2746CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2747 struct rb_calling_info *restrict calling,
2748 const struct rb_callinfo *restrict ci, int max_args)
2749{
2750 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2751 if (IS_ARGS_KW_SPLAT(ci)) {
2752 // f(*a, **kw)
2753 VM_ASSERT(calling->kw_splat == 1);
2754
2755 cfp->sp -= 2;
2756 calling->argc -= 2;
2757 VALUE ary = cfp->sp[0];
2758 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2759
2760 // splat a
2761 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2762
2763 // put kw
2764 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2765 if (UNLIKELY(calling->heap_argv)) {
2766 rb_ary_push(calling->heap_argv, kwh);
2767 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2768 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2769 calling->kw_splat = 0;
2770 }
2771 }
2772 else {
2773 cfp->sp[0] = kwh;
2774 cfp->sp++;
2775 calling->argc++;
2776
2777 VM_ASSERT(calling->kw_splat == 1);
2778 }
2779 }
2780 else {
2781 calling->kw_splat = 0;
2782 }
2783 }
2784 else {
2785 // f(*a)
2786 VM_ASSERT(calling->kw_splat == 0);
2787
2788 cfp->sp -= 1;
2789 calling->argc -= 1;
2790 VALUE ary = cfp->sp[0];
2791
2792 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2793 goto check_keyword;
2794 }
2795
2796 // check the last argument
2797 VALUE last_hash, argv_ary;
2798 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2799 if (!IS_ARGS_KEYWORD(ci) &&
2800 RARRAY_LEN(argv_ary) > 0 &&
2801 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2802 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2803
2804 rb_ary_pop(argv_ary);
2805 if (!RHASH_EMPTY_P(last_hash)) {
2806 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2807 calling->kw_splat = 1;
2808 }
2809 }
2810 }
2811 else {
2812check_keyword:
2813 if (!IS_ARGS_KEYWORD(ci) &&
2814 calling->argc > 0 &&
2815 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2816 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2817
2818 if (RHASH_EMPTY_P(last_hash)) {
2819 calling->argc--;
2820 cfp->sp -= 1;
2821 }
2822 else {
2823 cfp->sp[-1] = rb_hash_dup(last_hash);
2824 calling->kw_splat = 1;
2825 }
2826 }
2827 }
2828 }
2829 }
2830 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2831 // f(**kw)
2832 VM_ASSERT(calling->kw_splat == 1);
2833 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2834
2835 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2836 cfp->sp--;
2837 calling->argc--;
2838 calling->kw_splat = 0;
2839 }
2840 else {
2841 cfp->sp[-1] = kwh;
2842 }
2843 }
2844 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2845 // f(k1:1, k2:2)
2846 VM_ASSERT(calling->kw_splat == 0);
2847
2848 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2849 * by creating a keyword hash.
2850 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2851 */
2852 vm_caller_setup_arg_kw(cfp, calling, ci);
2853 }
2854}
2855
2856#define USE_OPT_HIST 0
2857
2858#if USE_OPT_HIST
2859#define OPT_HIST_MAX 64
2860static int opt_hist[OPT_HIST_MAX+1];
2861
2862__attribute__((destructor))
2863static void
2864opt_hist_show_results_at_exit(void)
2865{
2866 for (int i=0; i<OPT_HIST_MAX; i++) {
2867 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2868 }
2869}
2870#endif
2871
2872static VALUE
2873vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2874 struct rb_calling_info *calling)
2875{
2876 const struct rb_callcache *cc = calling->cc;
2877 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2878 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2879 const int opt = calling->argc - lead_num;
2880 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2881 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2882 const int param = ISEQ_BODY(iseq)->param.size;
2883 const int local = ISEQ_BODY(iseq)->local_table_size;
2884 const int delta = opt_num - opt;
2885
2886 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2887
2888#if USE_OPT_HIST
2889 if (opt_pc < OPT_HIST_MAX) {
2890 opt_hist[opt]++;
2891 }
2892 else {
2893 opt_hist[OPT_HIST_MAX]++;
2894 }
2895#endif
2896
2897 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2898}
2899
2900static VALUE
2901vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2902 struct rb_calling_info *calling)
2903{
2904 const struct rb_callcache *cc = calling->cc;
2905 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2906 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2907 const int opt = calling->argc - lead_num;
2908 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2909
2910 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2911
2912#if USE_OPT_HIST
2913 if (opt_pc < OPT_HIST_MAX) {
2914 opt_hist[opt]++;
2915 }
2916 else {
2917 opt_hist[OPT_HIST_MAX]++;
2918 }
2919#endif
2920
2921 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2922}
2923
2924static void
2925args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
2926 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2927 VALUE *const locals);
2928
2929static VALUE
2930vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2931 struct rb_calling_info *calling)
2932{
2933 const struct rb_callcache *cc = calling->cc;
2934 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2935 int param_size = ISEQ_BODY(iseq)->param.size;
2936 int local_size = ISEQ_BODY(iseq)->local_table_size;
2937
2938 // Setting up local size and param size
2939 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2940
2941 local_size = local_size + vm_ci_argc(calling->cd->ci);
2942 param_size = param_size + vm_ci_argc(calling->cd->ci);
2943
2944 cfp->sp[0] = (VALUE)calling->cd->ci;
2945
2946 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2947}
2948
2949static VALUE
2950vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2951 struct rb_calling_info *calling)
2952{
2953 const struct rb_callinfo *ci = calling->cd->ci;
2954 const struct rb_callcache *cc = calling->cc;
2955
2956 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2957 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2958
2959 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2960 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2961 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2962 const int ci_kw_len = kw_arg->keyword_len;
2963 const VALUE * const ci_keywords = kw_arg->keywords;
2964 VALUE *argv = cfp->sp - calling->argc;
2965 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2966 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2967 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2968 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2969 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
2970
2971 int param = ISEQ_BODY(iseq)->param.size;
2972 int local = ISEQ_BODY(iseq)->local_table_size;
2973 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2974}
2975
2976static VALUE
2977vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2978 struct rb_calling_info *calling)
2979{
2980 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2981 const struct rb_callcache *cc = calling->cc;
2982
2983 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2984 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2985
2986 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2987 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2988 VALUE * const argv = cfp->sp - calling->argc;
2989 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
2990
2991 int i;
2992 for (i=0; i<kw_param->num; i++) {
2993 klocals[i] = kw_param->default_values[i];
2994 }
2995 klocals[i] = INT2FIX(0); // kw specify flag
2996 // NOTE:
2997 // nobody check this value, but it should be cleared because it can
2998 // points invalid VALUE (T_NONE objects, raw pointer and so on).
2999
3000 int param = ISEQ_BODY(iseq)->param.size;
3001 int local = ISEQ_BODY(iseq)->local_table_size;
3002 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3003}
3004
3005static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3006
3007static VALUE
3008vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3009 struct rb_calling_info *calling)
3010{
3011 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3012 cfp->sp -= (calling->argc + 1);
3013 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3014 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3015}
3016
3017VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3018
3019static void
3020warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3021{
3022 rb_vm_t *vm = GET_VM();
3023 set_table *dup_check_table = vm->unused_block_warning_table;
3024 st_data_t key;
3025 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3026
3027 union {
3028 VALUE v;
3029 unsigned char b[SIZEOF_VALUE];
3030 } k1 = {
3031 .v = (VALUE)pc,
3032 }, k2 = {
3033 .v = (VALUE)cme->def,
3034 };
3035
3036 // relax check
3037 if (!strict_unused_block) {
3038 key = (st_data_t)cme->def->original_id;
3039
3040 if (set_lookup(dup_check_table, key)) {
3041 return;
3042 }
3043 }
3044
3045 // strict check
3046 // make unique key from pc and me->def pointer
3047 key = 0;
3048 for (int i=0; i<SIZEOF_VALUE; i++) {
3049 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3050 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3051 }
3052
3053 if (0) {
3054 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3055 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3056 fprintf(stderr, "key:%p\n", (void *)key);
3057 }
3058
3059 // duplication check
3060 if (set_insert(dup_check_table, key)) {
3061 // already shown
3062 }
3063 else if (RTEST(ruby_verbose) || strict_unused_block) {
3064 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3065 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3066
3067 if (!NIL_P(m_loc)) {
3068 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3069 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3070 }
3071 else {
3072 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3073 }
3074 }
3075}
3076
3077static inline int
3078vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3079 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3080{
3081 const struct rb_callinfo *ci = calling->cd->ci;
3082 const struct rb_callcache *cc = calling->cc;
3083
3084 VM_ASSERT((vm_ci_argc(ci), 1));
3085 VM_ASSERT(vm_cc_cme(cc) != NULL);
3086
3087 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3088 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3089 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3090 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3091 }
3092
3093 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3094 if (LIKELY(rb_simple_iseq_p(iseq))) {
3095 rb_control_frame_t *cfp = ec->cfp;
3096 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3097 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3098
3099 if (calling->argc != lead_num) {
3100 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3101 }
3102
3103 //VM_ASSERT(ci == calling->cd->ci);
3104 VM_ASSERT(cc == calling->cc);
3105
3106 if (vm_call_iseq_optimizable_p(ci, cc)) {
3107 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3108 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3109 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3110 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3111 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3112 }
3113 else {
3114 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3115 }
3116 }
3117 return 0;
3118 }
3119 else if (rb_iseq_only_optparam_p(iseq)) {
3120 rb_control_frame_t *cfp = ec->cfp;
3121
3122 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3123 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3124
3125 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3126 const int argc = calling->argc;
3127 const int opt = argc - lead_num;
3128
3129 if (opt < 0 || opt > opt_num) {
3130 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3131 }
3132
3133 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3134 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3135 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3136 vm_call_cacheable(ci, cc));
3137 }
3138 else {
3139 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3140 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3141 vm_call_cacheable(ci, cc));
3142 }
3143
3144 /* initialize opt vars for self-references */
3145 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3146 for (int i=argc; i<lead_num + opt_num; i++) {
3147 argv[i] = Qnil;
3148 }
3149 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3150 }
3151 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3152 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3153 const int argc = calling->argc;
3154 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3155
3156 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3157 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3158
3159 if (argc - kw_arg->keyword_len == lead_num) {
3160 const int ci_kw_len = kw_arg->keyword_len;
3161 const VALUE * const ci_keywords = kw_arg->keywords;
3162 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3163 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3164
3165 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3166 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3167
3168 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3169 vm_call_cacheable(ci, cc));
3170
3171 return 0;
3172 }
3173 }
3174 else if (argc == lead_num) {
3175 /* no kwarg */
3176 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3177 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3178
3179 if (klocals[kw_param->num] == INT2FIX(0)) {
3180 /* copy from default_values */
3181 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3182 vm_call_cacheable(ci, cc));
3183 }
3184
3185 return 0;
3186 }
3187 }
3188 }
3189
3190 // Called iseq is using ... param
3191 // def foo(...) # <- iseq for foo will have "forwardable"
3192 //
3193 // We want to set the `...` local to the caller's CI
3194 // foo(1, 2) # <- the ci for this should end up as `...`
3195 //
3196 // So hopefully the stack looks like:
3197 //
3198 // => 1
3199 // => 2
3200 // => *
3201 // => **
3202 // => &
3203 // => ... # <- points at `foo`s CI
3204 // => cref_or_me
3205 // => specval
3206 // => type
3207 //
3208 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3209 bool can_fastpath = true;
3210
3211 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3212 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3213 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3214 ci = vm_ci_new_runtime(
3215 vm_ci_mid(ci),
3216 vm_ci_flag(ci),
3217 vm_ci_argc(ci),
3218 vm_ci_kwarg(ci));
3219 }
3220 else {
3221 ci = forward_cd->caller_ci;
3222 }
3223 can_fastpath = false;
3224 }
3225 // C functions calling iseqs will stack allocate a CI,
3226 // so we need to convert it to heap allocated
3227 if (!vm_ci_markable(ci)) {
3228 ci = vm_ci_new_runtime(
3229 vm_ci_mid(ci),
3230 vm_ci_flag(ci),
3231 vm_ci_argc(ci),
3232 vm_ci_kwarg(ci));
3233 can_fastpath = false;
3234 }
3235 argv[param_size - 1] = (VALUE)ci;
3236 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3237 return 0;
3238 }
3239
3240 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3241}
3242
3243static void
3244vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3245{
3246 // This case is when the caller is using a ... parameter.
3247 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3248 // In this case the caller's caller's CI will be on the stack.
3249 //
3250 // For example:
3251 //
3252 // def bar(a, b); a + b; end
3253 // def foo(...); bar(...); end
3254 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3255 //
3256 // Stack layout will be:
3257 //
3258 // > 1
3259 // > 2
3260 // > CI for foo(1, 2)
3261 // > cref_or_me
3262 // > specval
3263 // > type
3264 // > receiver
3265 // > CI for foo(1, 2), via `getlocal ...`
3266 // > ( SP points here )
3267 const VALUE * lep = VM_CF_LEP(cfp);
3268
3269 const rb_iseq_t *iseq;
3270
3271 // If we're in an escaped environment (lambda for example), get the iseq
3272 // from the captured env.
3273 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3274 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3275 iseq = env->iseq;
3276 }
3277 else { // Otherwise use the lep to find the caller
3278 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3279 }
3280
3281 // Our local storage is below the args we need to copy
3282 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3283
3284 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3285 VALUE * to = cfp->sp - 1; // clobber the CI
3286
3287 if (RTEST(splat)) {
3288 to -= 1; // clobber the splat array
3289 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3290 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3291 to += RARRAY_LEN(splat);
3292 }
3293
3294 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3295 MEMCPY(to, from, VALUE, argc);
3296 cfp->sp = to + argc;
3297
3298 // Stack layout should now be:
3299 //
3300 // > 1
3301 // > 2
3302 // > CI for foo(1, 2)
3303 // > cref_or_me
3304 // > specval
3305 // > type
3306 // > receiver
3307 // > 1
3308 // > 2
3309 // > ( SP points here )
3310}
3311
3312static VALUE
3313vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3314{
3315 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3316
3317 const struct rb_callcache *cc = calling->cc;
3318 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3319 int param_size = ISEQ_BODY(iseq)->param.size;
3320 int local_size = ISEQ_BODY(iseq)->local_table_size;
3321
3322 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3323
3324 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3325 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3326}
3327
3328static VALUE
3329vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3330{
3331 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3332
3333 const struct rb_callcache *cc = calling->cc;
3334 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3335 int param_size = ISEQ_BODY(iseq)->param.size;
3336 int local_size = ISEQ_BODY(iseq)->local_table_size;
3337
3338 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3339
3340 // Setting up local size and param size
3341 local_size = local_size + vm_ci_argc(calling->cd->ci);
3342 param_size = param_size + vm_ci_argc(calling->cd->ci);
3343
3344 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3345 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3346}
3347
3348static inline VALUE
3349vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3350 int opt_pc, int param_size, int local_size)
3351{
3352 const struct rb_callinfo *ci = calling->cd->ci;
3353 const struct rb_callcache *cc = calling->cc;
3354
3355 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3356 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3357 }
3358 else {
3359 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3360 }
3361}
3362
3363static inline VALUE
3364vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3365 int opt_pc, int param_size, int local_size)
3366{
3367 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3368 VALUE *argv = cfp->sp - calling->argc;
3369 VALUE *sp = argv + param_size;
3370 cfp->sp = argv - 1 /* recv */;
3371
3372 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3373 calling->block_handler, (VALUE)me,
3374 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3375 local_size - param_size,
3376 ISEQ_BODY(iseq)->stack_max);
3377 return Qundef;
3378}
3379
3380static inline VALUE
3381vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3382{
3383 const struct rb_callcache *cc = calling->cc;
3384 unsigned int i;
3385 VALUE *argv = cfp->sp - calling->argc;
3386 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3387 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3388 VALUE *src_argv = argv;
3389 VALUE *sp_orig, *sp;
3390 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3391
3392 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3393 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3394 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3395 dst_captured->code.val = src_captured->code.val;
3396 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3397 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3398 }
3399 else {
3400 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3401 }
3402 }
3403
3404 vm_pop_frame(ec, cfp, cfp->ep);
3405 cfp = ec->cfp;
3406
3407 sp_orig = sp = cfp->sp;
3408
3409 /* push self */
3410 sp[0] = calling->recv;
3411 sp++;
3412
3413 /* copy arguments */
3414 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3415 *sp++ = src_argv[i];
3416 }
3417
3418 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3419 calling->recv, calling->block_handler, (VALUE)me,
3420 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3421 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3422 ISEQ_BODY(iseq)->stack_max);
3423
3424 cfp->sp = sp_orig;
3425
3426 return Qundef;
3427}
3428
3429static void
3430ractor_unsafe_check(void)
3431{
3432 if (!rb_ractor_main_p()) {
3433 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3434 }
3435}
3436
3437static VALUE
3438call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3439{
3440 ractor_unsafe_check();
3441 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3442 return (*f)(recv, rb_ary_new4(argc, argv));
3443}
3444
3445static VALUE
3446call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3447{
3448 ractor_unsafe_check();
3449 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3450 return (*f)(argc, argv, recv);
3451}
3452
3453static VALUE
3454call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3455{
3456 ractor_unsafe_check();
3457 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3458 return (*f)(recv);
3459}
3460
3461static VALUE
3462call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3463{
3464 ractor_unsafe_check();
3465 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3466 return (*f)(recv, argv[0]);
3467}
3468
3469static VALUE
3470call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3471{
3472 ractor_unsafe_check();
3473 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3474 return (*f)(recv, argv[0], argv[1]);
3475}
3476
3477static VALUE
3478call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3479{
3480 ractor_unsafe_check();
3481 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3482 return (*f)(recv, argv[0], argv[1], argv[2]);
3483}
3484
3485static VALUE
3486call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3487{
3488 ractor_unsafe_check();
3489 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3490 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3491}
3492
3493static VALUE
3494call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3495{
3496 ractor_unsafe_check();
3497 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3498 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3499}
3500
3501static VALUE
3502call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3503{
3504 ractor_unsafe_check();
3506 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3507}
3508
3509static VALUE
3510call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3511{
3512 ractor_unsafe_check();
3514 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3515}
3516
3517static VALUE
3518call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3519{
3520 ractor_unsafe_check();
3522 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3523}
3524
3525static VALUE
3526call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3527{
3528 ractor_unsafe_check();
3530 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3531}
3532
3533static VALUE
3534call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3535{
3536 ractor_unsafe_check();
3538 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3539}
3540
3541static VALUE
3542call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3543{
3544 ractor_unsafe_check();
3546 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3547}
3548
3549static VALUE
3550call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3551{
3552 ractor_unsafe_check();
3554 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3555}
3556
3557static VALUE
3558call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3559{
3560 ractor_unsafe_check();
3562 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3563}
3564
3565static VALUE
3566call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3567{
3568 ractor_unsafe_check();
3570 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3571}
3572
3573static VALUE
3574call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3575{
3576 ractor_unsafe_check();
3578 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3579}
3580
3581static VALUE
3582ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3583{
3584 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3585 return (*f)(recv, rb_ary_new4(argc, argv));
3586}
3587
3588static VALUE
3589ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3590{
3591 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3592 return (*f)(argc, argv, recv);
3593}
3594
3595static VALUE
3596ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3597{
3598 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3599 return (*f)(recv);
3600}
3601
3602static VALUE
3603ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3604{
3605 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3606 return (*f)(recv, argv[0]);
3607}
3608
3609static VALUE
3610ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3611{
3612 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3613 return (*f)(recv, argv[0], argv[1]);
3614}
3615
3616static VALUE
3617ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3618{
3619 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3620 return (*f)(recv, argv[0], argv[1], argv[2]);
3621}
3622
3623static VALUE
3624ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3625{
3626 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3627 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3628}
3629
3630static VALUE
3631ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3632{
3633 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3634 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3635}
3636
3637static VALUE
3638ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3639{
3641 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3642}
3643
3644static VALUE
3645ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3646{
3648 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3649}
3650
3651static VALUE
3652ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3653{
3655 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3656}
3657
3658static VALUE
3659ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3660{
3662 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3663}
3664
3665static VALUE
3666ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3667{
3669 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3670}
3671
3672static VALUE
3673ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3674{
3676 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3677}
3678
3679static VALUE
3680ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3681{
3683 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3684}
3685
3686static VALUE
3687ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3688{
3690 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3691}
3692
3693static VALUE
3694ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3695{
3697 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3698}
3699
3700static VALUE
3701ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3702{
3704 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3705}
3706
3707static inline int
3708vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3709{
3710 const int ov_flags = RAISED_STACKOVERFLOW;
3711 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3712 if (rb_ec_raised_p(ec, ov_flags)) {
3713 rb_ec_raised_reset(ec, ov_flags);
3714 return TRUE;
3715 }
3716 return FALSE;
3717}
3718
3719#define CHECK_CFP_CONSISTENCY(func) \
3720 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3721 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3722
3723static inline
3724const rb_method_cfunc_t *
3725vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3726{
3727#if VM_DEBUG_VERIFY_METHOD_CACHE
3728 switch (me->def->type) {
3729 case VM_METHOD_TYPE_CFUNC:
3730 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3731 break;
3732# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3733 METHOD_BUG(ISEQ);
3734 METHOD_BUG(ATTRSET);
3735 METHOD_BUG(IVAR);
3736 METHOD_BUG(BMETHOD);
3737 METHOD_BUG(ZSUPER);
3738 METHOD_BUG(UNDEF);
3739 METHOD_BUG(OPTIMIZED);
3740 METHOD_BUG(MISSING);
3741 METHOD_BUG(REFINED);
3742 METHOD_BUG(ALIAS);
3743# undef METHOD_BUG
3744 default:
3745 rb_bug("wrong method type: %d", me->def->type);
3746 }
3747#endif
3748 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3749}
3750
3751static VALUE
3752vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3753 int argc, VALUE *argv, VALUE *stack_bottom)
3754{
3755 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3756 const struct rb_callinfo *ci = calling->cd->ci;
3757 const struct rb_callcache *cc = calling->cc;
3758 VALUE val;
3759 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3760 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3761
3762 VALUE recv = calling->recv;
3763 VALUE block_handler = calling->block_handler;
3764 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3765
3766 if (UNLIKELY(calling->kw_splat)) {
3767 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3768 }
3769
3770 VM_ASSERT(reg_cfp == ec->cfp);
3771
3772 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3773 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3774
3775 vm_push_frame(ec, NULL, frame_type, recv,
3776 block_handler, (VALUE)me,
3777 0, ec->cfp->sp, 0, 0);
3778
3779 int len = cfunc->argc;
3780 if (len >= 0) rb_check_arity(argc, len, len);
3781
3782 reg_cfp->sp = stack_bottom;
3783 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3784
3785 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3786
3787 rb_vm_pop_frame(ec);
3788
3789 VM_ASSERT(ec->cfp->sp == stack_bottom);
3790
3791 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3792 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3793
3794 return val;
3795}
3796
3797// Push a C method frame for a given cme. This is called when JIT code skipped
3798// pushing a frame but the C method reached a point where a frame is needed.
3799void
3800rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3801{
3802 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3803 rb_execution_context_t *ec = GET_EC();
3804 VALUE *sp = ec->cfp->sp;
3805 VALUE recv = *(sp - recv_idx - 1);
3806 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3807 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3808#if VM_CHECK_MODE > 0
3809 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3810 *(GET_EC()->cfp->sp) = Qfalse;
3811#endif
3812 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3813}
3814
3815// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3816bool
3817rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3818{
3819 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3820}
3821
3822static VALUE
3823vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3824{
3825 int argc = calling->argc;
3826 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3827 VALUE *argv = &stack_bottom[1];
3828
3829 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3830}
3831
3832static VALUE
3833vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3834{
3835 const struct rb_callinfo *ci = calling->cd->ci;
3836 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3837
3838 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3839 VALUE argv_ary;
3840 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3841 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3842 int argc = RARRAY_LENINT(argv_ary);
3843 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3844 VALUE *stack_bottom = reg_cfp->sp - 2;
3845
3846 VM_ASSERT(calling->argc == 1);
3847 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3848 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3849
3850 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3851 }
3852 else {
3853 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3854
3855 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3856 }
3857}
3858
3859static inline VALUE
3860vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3861{
3862 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3863 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3864
3865 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3866 return vm_call_cfunc_other(ec, reg_cfp, calling);
3867 }
3868
3869 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3870 calling->kw_splat = 0;
3871 int i;
3872 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3873 VALUE *sp = stack_bottom;
3874 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3875 for(i = 0; i < argc; i++) {
3876 *++sp = argv[i];
3877 }
3878 reg_cfp->sp = sp+1;
3879
3880 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3881}
3882
3883static inline VALUE
3884vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3885{
3886 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3887 VALUE argv_ary = reg_cfp->sp[-1];
3888 int argc = RARRAY_LENINT(argv_ary);
3889 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3890 VALUE last_hash;
3891 int argc_offset = 0;
3892
3893 if (UNLIKELY(argc > 0 &&
3894 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3895 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3896 if (!RHASH_EMPTY_P(last_hash)) {
3897 return vm_call_cfunc_other(ec, reg_cfp, calling);
3898 }
3899 argc_offset++;
3900 }
3901 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3902}
3903
3904static inline VALUE
3905vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3906{
3907 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3908 VALUE keyword_hash = reg_cfp->sp[-1];
3909
3910 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3911 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3912 }
3913
3914 return vm_call_cfunc_other(ec, reg_cfp, calling);
3915}
3916
3917static VALUE
3918vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3919{
3920 const struct rb_callinfo *ci = calling->cd->ci;
3921 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3922
3923 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3924 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3925 // f(*a)
3926 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3927 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3928 }
3929 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3930 // f(*a, **kw)
3931 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3932 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3933 }
3934 }
3935
3936 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3937 return vm_call_cfunc_other(ec, reg_cfp, calling);
3938}
3939
3940static VALUE
3941vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3942{
3943 const struct rb_callcache *cc = calling->cc;
3944 RB_DEBUG_COUNTER_INC(ccf_ivar);
3945 cfp->sp -= 1;
3946 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3947 return ivar;
3948}
3949
3950static VALUE
3951vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3952{
3953 RB_DEBUG_COUNTER_INC(ccf_attrset);
3954 VALUE val = *(cfp->sp - 1);
3955 cfp->sp -= 2;
3956 attr_index_t index;
3957 shape_id_t dest_shape_id;
3958 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
3959 ID id = vm_cc_cme(cc)->def->body.attr.id;
3960 rb_check_frozen(obj);
3961 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3962 if (UNDEF_P(res)) {
3963 switch (BUILTIN_TYPE(obj)) {
3964 case T_OBJECT:
3965 case T_CLASS:
3966 case T_MODULE:
3967 break;
3968 default:
3969 {
3970 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3971 if (!UNDEF_P(res)) {
3972 return res;
3973 }
3974 }
3975 }
3976 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3977 }
3978 return res;
3979}
3980
3981static VALUE
3982vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3983{
3984 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3985}
3986
3987static inline VALUE
3988vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3989{
3990 rb_proc_t *proc;
3991 VALUE val;
3992 const struct rb_callcache *cc = calling->cc;
3993 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3994 VALUE procv = cme->def->body.bmethod.proc;
3995
3996 if (!RB_OBJ_SHAREABLE_P(procv) &&
3997 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
3998 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
3999 }
4000
4001 /* control block frame */
4002 GetProcPtr(procv, proc);
4003 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4004
4005 return val;
4006}
4007
4008static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4009
4010static VALUE
4011vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4012{
4013 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4014
4015 const struct rb_callcache *cc = calling->cc;
4016 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4017 VALUE procv = cme->def->body.bmethod.proc;
4018
4019 if (!RB_OBJ_SHAREABLE_P(procv) &&
4020 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4021 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4022 }
4023
4024 rb_proc_t *proc;
4025 GetProcPtr(procv, proc);
4026 const struct rb_block *block = &proc->block;
4027
4028 while (vm_block_type(block) == block_type_proc) {
4029 block = vm_proc_block(block->as.proc);
4030 }
4031 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4032
4033 const struct rb_captured_block *captured = &block->as.captured;
4034 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4035 VALUE * const argv = cfp->sp - calling->argc;
4036 const int arg_size = ISEQ_BODY(iseq)->param.size;
4037
4038 int opt_pc;
4039 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4040 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4041 }
4042 else {
4043 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4044 }
4045
4046 cfp->sp = argv - 1; // -1 for the receiver
4047
4048 vm_push_frame(ec, iseq,
4049 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4050 calling->recv,
4051 VM_GUARDED_PREV_EP(captured->ep),
4052 (VALUE)cme,
4053 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4054 argv + arg_size,
4055 ISEQ_BODY(iseq)->local_table_size - arg_size,
4056 ISEQ_BODY(iseq)->stack_max);
4057
4058 return Qundef;
4059}
4060
4061static VALUE
4062vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4063{
4064 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4065
4066 VALUE *argv;
4067 int argc;
4068 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4069 if (UNLIKELY(calling->heap_argv)) {
4070 argv = RARRAY_PTR(calling->heap_argv);
4071 cfp->sp -= 2;
4072 }
4073 else {
4074 argc = calling->argc;
4075 argv = ALLOCA_N(VALUE, argc);
4076 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4077 cfp->sp += - argc - 1;
4078 }
4079
4080 return vm_call_bmethod_body(ec, calling, argv);
4081}
4082
4083static VALUE
4084vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4085{
4086 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4087
4088 const struct rb_callcache *cc = calling->cc;
4089 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4090 VALUE procv = cme->def->body.bmethod.proc;
4091 rb_proc_t *proc;
4092 GetProcPtr(procv, proc);
4093 const struct rb_block *block = &proc->block;
4094
4095 while (vm_block_type(block) == block_type_proc) {
4096 block = vm_proc_block(block->as.proc);
4097 }
4098 if (vm_block_type(block) == block_type_iseq) {
4099 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4100 return vm_call_iseq_bmethod(ec, cfp, calling);
4101 }
4102
4103 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4104 return vm_call_noniseq_bmethod(ec, cfp, calling);
4105}
4106
4107VALUE
4108rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4109{
4110 VALUE klass = current_class;
4111
4112 /* for prepended Module, then start from cover class */
4113 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4114 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4115 klass = RBASIC_CLASS(klass);
4116 }
4117
4118 while (RTEST(klass)) {
4119 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4120 if (owner == target_owner) {
4121 return klass;
4122 }
4123 klass = RCLASS_SUPER(klass);
4124 }
4125
4126 return current_class; /* maybe module function */
4127}
4128
4129static const rb_callable_method_entry_t *
4130aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4131{
4132 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4133 const rb_callable_method_entry_t *cme;
4134
4135 if (orig_me->defined_class == 0) {
4136 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4137 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4138 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4139
4140 if (me->def->reference_count == 1) {
4141 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4142 }
4143 else {
4145 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4146 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4147 }
4148 }
4149 else {
4150 cme = (const rb_callable_method_entry_t *)orig_me;
4151 }
4152
4153 VM_ASSERT(callable_method_entry_p(cme));
4154 return cme;
4155}
4156
4158rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4159{
4160 return aliased_callable_method_entry(me);
4161}
4162
4163static VALUE
4164vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4165{
4166 calling->cc = &VM_CC_ON_STACK(Qundef,
4167 vm_call_general,
4168 {{0}},
4169 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4170
4171 return vm_call_method_each_type(ec, cfp, calling);
4172}
4173
4174static enum method_missing_reason
4175ci_missing_reason(const struct rb_callinfo *ci)
4176{
4177 enum method_missing_reason stat = MISSING_NOENTRY;
4178 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4179 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4180 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4181 return stat;
4182}
4183
4184static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4185
4186static VALUE
4187vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4188 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4189{
4190 ASSUME(calling->argc >= 0);
4191
4192 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4193 int argc = calling->argc;
4194 VALUE recv = calling->recv;
4195 VALUE klass = CLASS_OF(recv);
4196 ID mid = rb_check_id(&symbol);
4197 flags |= VM_CALL_OPT_SEND;
4198
4199 if (UNLIKELY(! mid)) {
4200 mid = idMethodMissing;
4201 missing_reason = ci_missing_reason(ci);
4202 ec->method_missing_reason = missing_reason;
4203
4204 VALUE argv_ary;
4205 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4206 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4207 rb_ary_unshift(argv_ary, symbol);
4208
4209 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4210 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4211 VALUE exc = rb_make_no_method_exception(
4212 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4213
4214 rb_exc_raise(exc);
4215 }
4216 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4217 }
4218 else {
4219 /* E.g. when argc == 2
4220 *
4221 * | | | | TOPN
4222 * | | +------+
4223 * | | +---> | arg1 | 0
4224 * +------+ | +------+
4225 * | arg1 | -+ +-> | arg0 | 1
4226 * +------+ | +------+
4227 * | arg0 | ---+ | sym | 2
4228 * +------+ +------+
4229 * | recv | | recv | 3
4230 * --+------+--------+------+------
4231 */
4232 int i = argc;
4233 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4234 INC_SP(1);
4235 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4236 argc = ++calling->argc;
4237
4238 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4239 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4240 TOPN(i) = symbol;
4241 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4242 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4243 VALUE exc = rb_make_no_method_exception(
4244 rb_eNoMethodError, 0, recv, argc, argv, priv);
4245
4246 rb_exc_raise(exc);
4247 }
4248 else {
4249 TOPN(i) = rb_str_intern(symbol);
4250 }
4251 }
4252 }
4253
4254 struct rb_forwarding_call_data new_fcd = {
4255 .cd = {
4256 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4257 .cc = NULL,
4258 },
4259 .caller_ci = NULL,
4260 };
4261
4262 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4263 calling->cd = &new_fcd.cd;
4264 }
4265 else {
4266 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4267 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4268 new_fcd.caller_ci = caller_ci;
4269 calling->cd = (struct rb_call_data *)&new_fcd;
4270 }
4271 calling->cc = &VM_CC_ON_STACK(klass,
4272 vm_call_general,
4273 { .method_missing_reason = missing_reason },
4274 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4275
4276 if (flags & VM_CALL_FCALL) {
4277 return vm_call_method(ec, reg_cfp, calling);
4278 }
4279
4280 const struct rb_callcache *cc = calling->cc;
4281 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4282
4283 if (vm_cc_cme(cc) != NULL) {
4284 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4285 case METHOD_VISI_PUBLIC: /* likely */
4286 return vm_call_method_each_type(ec, reg_cfp, calling);
4287 case METHOD_VISI_PRIVATE:
4288 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4289 break;
4290 case METHOD_VISI_PROTECTED:
4291 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4292 break;
4293 default:
4294 VM_UNREACHABLE(vm_call_method);
4295 }
4296 return vm_call_method_missing(ec, reg_cfp, calling);
4297 }
4298
4299 return vm_call_method_nome(ec, reg_cfp, calling);
4300}
4301
4302static VALUE
4303vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4304{
4305 const struct rb_callinfo *ci = calling->cd->ci;
4306 int i;
4307 VALUE sym;
4308
4309 i = calling->argc - 1;
4310
4311 if (calling->argc == 0) {
4312 rb_raise(rb_eArgError, "no method name given");
4313 }
4314
4315 sym = TOPN(i);
4316 /* E.g. when i == 2
4317 *
4318 * | | | | TOPN
4319 * +------+ | |
4320 * | arg1 | ---+ | | 0
4321 * +------+ | +------+
4322 * | arg0 | -+ +-> | arg1 | 1
4323 * +------+ | +------+
4324 * | sym | +---> | arg0 | 2
4325 * +------+ +------+
4326 * | recv | | recv | 3
4327 * --+------+--------+------+------
4328 */
4329 /* shift arguments */
4330 if (i > 0) {
4331 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4332 }
4333 calling->argc -= 1;
4334 DEC_SP(1);
4335
4336 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4337}
4338
4339static VALUE
4340vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4341{
4342 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4343 const struct rb_callinfo *ci = calling->cd->ci;
4344 int flags = VM_CALL_FCALL;
4345 VALUE sym;
4346
4347 VALUE argv_ary;
4348 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4349 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4350 sym = rb_ary_shift(argv_ary);
4351 flags |= VM_CALL_ARGS_SPLAT;
4352 if (calling->kw_splat) {
4353 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4354 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4355 calling->kw_splat = 0;
4356 }
4357 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4358 }
4359
4360 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4361 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4362}
4363
4364static VALUE
4365vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4366{
4367 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4368 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4369}
4370
4371static VALUE
4372vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4373{
4374 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4375
4376 const struct rb_callinfo *ci = calling->cd->ci;
4377 int flags = vm_ci_flag(ci);
4378
4379 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4380 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4381 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4382 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4383 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4384 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4385 }
4386
4387 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4388 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4389}
4390
4391static VALUE
4392vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4393 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4394{
4395 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4396
4397 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4398 unsigned int argc, flag;
4399
4400 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4401 argc = ++calling->argc;
4402
4403 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4404 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4405 vm_check_canary(ec, reg_cfp->sp);
4406 if (argc > 1) {
4407 MEMMOVE(argv+1, argv, VALUE, argc-1);
4408 }
4409 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4410 INC_SP(1);
4411
4412 ec->method_missing_reason = reason;
4413
4414 struct rb_forwarding_call_data new_fcd = {
4415 .cd = {
4416 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4417 .cc = NULL,
4418 },
4419 .caller_ci = NULL,
4420 };
4421
4422 if (!(flag & VM_CALL_FORWARDING)) {
4423 calling->cd = &new_fcd.cd;
4424 }
4425 else {
4426 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4427 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4428 new_fcd.caller_ci = caller_ci;
4429 calling->cd = (struct rb_call_data *)&new_fcd;
4430 }
4431
4432 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4433 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4434 return vm_call_method(ec, reg_cfp, calling);
4435}
4436
4437static VALUE
4438vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4439{
4440 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4441}
4442
4443static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4444static VALUE
4445vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4446{
4447 klass = RCLASS_SUPER(klass);
4448
4449 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4450 if (cme == NULL) {
4451 return vm_call_method_nome(ec, cfp, calling);
4452 }
4453 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4454 cme->def->body.refined.orig_me) {
4455 cme = refined_method_callable_without_refinement(cme);
4456 }
4457
4458 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4459
4460 return vm_call_method_each_type(ec, cfp, calling);
4461}
4462
4463static inline VALUE
4464find_refinement(VALUE refinements, VALUE klass)
4465{
4466 if (NIL_P(refinements)) {
4467 return Qnil;
4468 }
4469 return rb_hash_lookup(refinements, klass);
4470}
4471
4472PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4473static rb_control_frame_t *
4474current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4475{
4476 rb_control_frame_t *top_cfp = cfp;
4477
4478 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4479 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4480
4481 do {
4482 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4483 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4484 /* TODO: orphan block */
4485 return top_cfp;
4486 }
4487 } while (cfp->iseq != local_iseq);
4488 }
4489 return cfp;
4490}
4491
4492static const rb_callable_method_entry_t *
4493refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4494{
4495 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4496 const rb_callable_method_entry_t *cme;
4497
4498 if (orig_me->defined_class == 0) {
4499 cme = NULL;
4501 }
4502 else {
4503 cme = (const rb_callable_method_entry_t *)orig_me;
4504 }
4505
4506 VM_ASSERT(callable_method_entry_p(cme));
4507
4508 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4509 cme = NULL;
4510 }
4511
4512 return cme;
4513}
4514
4515static const rb_callable_method_entry_t *
4516search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4517{
4518 ID mid = vm_ci_mid(calling->cd->ci);
4519 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4520 const struct rb_callcache * const cc = calling->cc;
4521 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4522
4523 for (; cref; cref = CREF_NEXT(cref)) {
4524 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4525 if (NIL_P(refinement)) continue;
4526
4527 const rb_callable_method_entry_t *const ref_me =
4528 rb_callable_method_entry(refinement, mid);
4529
4530 if (ref_me) {
4531 if (vm_cc_call(cc) == vm_call_super_method) {
4532 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4533 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4534 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4535 continue;
4536 }
4537 }
4538
4539 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4540 cme->def != ref_me->def) {
4541 cme = ref_me;
4542 }
4543 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4544 return cme;
4545 }
4546 }
4547 else {
4548 return NULL;
4549 }
4550 }
4551
4552 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4553 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4554 }
4555 else {
4556 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4557 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4558 return cme;
4559 }
4560}
4561
4562static VALUE
4563vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4564{
4565 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4566
4567 if (ref_cme) {
4568 if (calling->cd->cc) {
4569 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4570 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4571 return vm_call_method(ec, cfp, calling);
4572 }
4573 else {
4574 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4575 calling->cc= ref_cc;
4576 return vm_call_method(ec, cfp, calling);
4577 }
4578 }
4579 else {
4580 return vm_call_method_nome(ec, cfp, calling);
4581 }
4582}
4583
4584static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4585
4586NOINLINE(static VALUE
4587 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4588 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4589
4590static VALUE
4591vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4592 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4593{
4594 int argc = calling->argc;
4595
4596 /* remove self */
4597 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4598 DEC_SP(1);
4599
4600 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4601}
4602
4603static VALUE
4604vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4605{
4606 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4607
4608 const struct rb_callinfo *ci = calling->cd->ci;
4609 VALUE procval = calling->recv;
4610 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4611}
4612
4613static VALUE
4614vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4615{
4616 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4617
4618 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4619 const struct rb_callinfo *ci = calling->cd->ci;
4620
4621 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4622 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4623 }
4624 else {
4625 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4626 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4627 return vm_call_general(ec, reg_cfp, calling);
4628 }
4629}
4630
4631static VALUE
4632vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4633{
4634 VALUE recv = calling->recv;
4635
4636 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4637 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4638 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4639
4640 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4641 return internal_RSTRUCT_GET(recv, off);
4642}
4643
4644static VALUE
4645vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4646{
4647 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4648
4649 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4650 reg_cfp->sp -= 1;
4651 return ret;
4652}
4653
4654static VALUE
4655vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4656{
4657 VALUE recv = calling->recv;
4658
4659 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4660 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4661 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4662
4663 rb_check_frozen(recv);
4664
4665 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4666 internal_RSTRUCT_SET(recv, off, val);
4667
4668 return val;
4669}
4670
4671static VALUE
4672vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4673{
4674 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4675
4676 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4677 reg_cfp->sp -= 2;
4678 return ret;
4679}
4680
4681NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4682 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4683
4684#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4685 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4686 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4687 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4688 var = func; \
4689 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4690 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4691 } \
4692 else { \
4693 nohook; \
4694 var = func; \
4695 }
4696
4697static VALUE
4698vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4699 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4700{
4701 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4702 case OPTIMIZED_METHOD_TYPE_SEND:
4703 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4704 return vm_call_opt_send(ec, cfp, calling);
4705 case OPTIMIZED_METHOD_TYPE_CALL:
4706 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4707 return vm_call_opt_call(ec, cfp, calling);
4708 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4709 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4710 return vm_call_opt_block_call(ec, cfp, calling);
4711 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4712 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4713 rb_check_arity(calling->argc, 0, 0);
4714
4715 VALUE v;
4716 VM_CALL_METHOD_ATTR(v,
4717 vm_call_opt_struct_aref(ec, cfp, calling),
4718 set_vm_cc_ivar(cc); \
4719 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4720 return v;
4721 }
4722 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4723 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4724 rb_check_arity(calling->argc, 1, 1);
4725
4726 VALUE v;
4727 VM_CALL_METHOD_ATTR(v,
4728 vm_call_opt_struct_aset(ec, cfp, calling),
4729 set_vm_cc_ivar(cc); \
4730 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4731 return v;
4732 }
4733 default:
4734 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4735 }
4736}
4737
4738static VALUE
4739vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4740{
4741 const struct rb_callinfo *ci = calling->cd->ci;
4742 const struct rb_callcache *cc = calling->cc;
4743 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4744 VALUE v;
4745
4746 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4747
4748 switch (cme->def->type) {
4749 case VM_METHOD_TYPE_ISEQ:
4750 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4751 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4752 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4753 }
4754 else {
4755 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4756 return vm_call_iseq_setup(ec, cfp, calling);
4757 }
4758
4759 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4760 case VM_METHOD_TYPE_CFUNC:
4761 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4762 return vm_call_cfunc(ec, cfp, calling);
4763
4764 case VM_METHOD_TYPE_ATTRSET:
4765 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4766
4767 rb_check_arity(calling->argc, 1, 1);
4768
4769 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4770
4771 if (vm_cc_markable(cc)) {
4772 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4773 VM_CALL_METHOD_ATTR(v,
4774 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4775 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4776 }
4777 else {
4778 cc = &((struct rb_callcache) {
4779 .flags = T_IMEMO |
4780 (imemo_callcache << FL_USHIFT) |
4781 VM_CALLCACHE_UNMARKABLE |
4782 VM_CALLCACHE_ON_STACK,
4783 .klass = cc->klass,
4784 .cme_ = cc->cme_,
4785 .call_ = cc->call_,
4786 .aux_ = {
4787 .attr = {
4788 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4789 }
4790 },
4791 });
4792
4793 VM_CALL_METHOD_ATTR(v,
4794 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4795 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4796 }
4797 return v;
4798
4799 case VM_METHOD_TYPE_IVAR:
4800 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4801 rb_check_arity(calling->argc, 0, 0);
4802 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4803 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4804 VM_CALL_METHOD_ATTR(v,
4805 vm_call_ivar(ec, cfp, calling),
4806 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4807 return v;
4808
4809 case VM_METHOD_TYPE_MISSING:
4810 vm_cc_method_missing_reason_set(cc, 0);
4811 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4812 return vm_call_method_missing(ec, cfp, calling);
4813
4814 case VM_METHOD_TYPE_BMETHOD:
4815 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4816 return vm_call_bmethod(ec, cfp, calling);
4817
4818 case VM_METHOD_TYPE_ALIAS:
4819 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4820 return vm_call_alias(ec, cfp, calling);
4821
4822 case VM_METHOD_TYPE_OPTIMIZED:
4823 return vm_call_optimized(ec, cfp, calling, ci, cc);
4824
4825 case VM_METHOD_TYPE_UNDEF:
4826 break;
4827
4828 case VM_METHOD_TYPE_ZSUPER:
4829 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4830
4831 case VM_METHOD_TYPE_REFINED:
4832 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4833 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4834 return vm_call_refined(ec, cfp, calling);
4835 }
4836
4837 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4838}
4839
4840NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4841
4842static VALUE
4843vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4844{
4845 /* method missing */
4846 const struct rb_callinfo *ci = calling->cd->ci;
4847 const int stat = ci_missing_reason(ci);
4848
4849 if (vm_ci_mid(ci) == idMethodMissing) {
4850 if (UNLIKELY(calling->heap_argv)) {
4851 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4852 }
4853 else {
4854 rb_control_frame_t *reg_cfp = cfp;
4855 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4856 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4857 }
4858 }
4859 else {
4860 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4861 }
4862}
4863
4864/* Protected method calls and super invocations need to check that the receiver
4865 * (self for super) inherits the module on which the method is defined.
4866 * In the case of refinements, it should consider the original class not the
4867 * refinement.
4868 */
4869static VALUE
4870vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4871{
4872 VALUE defined_class = me->defined_class;
4873 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4874 return NIL_P(refined_class) ? defined_class : refined_class;
4875}
4876
4877static inline VALUE
4878vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4879{
4880 const struct rb_callinfo *ci = calling->cd->ci;
4881 const struct rb_callcache *cc = calling->cc;
4882
4883 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4884
4885 if (vm_cc_cme(cc) != NULL) {
4886 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4887 case METHOD_VISI_PUBLIC: /* likely */
4888 return vm_call_method_each_type(ec, cfp, calling);
4889
4890 case METHOD_VISI_PRIVATE:
4891 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4892 enum method_missing_reason stat = MISSING_PRIVATE;
4893 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4894
4895 vm_cc_method_missing_reason_set(cc, stat);
4896 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4897 return vm_call_method_missing(ec, cfp, calling);
4898 }
4899 return vm_call_method_each_type(ec, cfp, calling);
4900
4901 case METHOD_VISI_PROTECTED:
4902 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4903 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4904 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4905 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4906 return vm_call_method_missing(ec, cfp, calling);
4907 }
4908 else {
4909 /* caching method info to dummy cc */
4910 VM_ASSERT(vm_cc_cme(cc) != NULL);
4911 struct rb_callcache cc_on_stack = *cc;
4912 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4913 calling->cc = &cc_on_stack;
4914 return vm_call_method_each_type(ec, cfp, calling);
4915 }
4916 }
4917 return vm_call_method_each_type(ec, cfp, calling);
4918
4919 default:
4920 rb_bug("unreachable");
4921 }
4922 }
4923 else {
4924 return vm_call_method_nome(ec, cfp, calling);
4925 }
4926}
4927
4928static VALUE
4929vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4930{
4931 RB_DEBUG_COUNTER_INC(ccf_general);
4932 return vm_call_method(ec, reg_cfp, calling);
4933}
4934
4935void
4936rb_vm_cc_general(const struct rb_callcache *cc)
4937{
4938 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4939 VM_ASSERT(cc != vm_cc_empty());
4940
4941 *(vm_call_handler *)&cc->call_ = vm_call_general;
4942}
4943
4944static VALUE
4945vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4946{
4947 RB_DEBUG_COUNTER_INC(ccf_super_method);
4948
4949 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4950 // can merge the function and the address of the function becomes same.
4951 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4952 if (ec == NULL) rb_bug("unreachable");
4953
4954 /* this check is required to distinguish with other functions. */
4955 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4956 return vm_call_method(ec, reg_cfp, calling);
4957}
4958
4959/* super */
4960
4961static inline VALUE
4962vm_search_normal_superclass(VALUE klass)
4963{
4964 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4965 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4966 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4967 klass = RBASIC(klass)->klass;
4968 }
4969 klass = RCLASS_ORIGIN(klass);
4970 return RCLASS_SUPER(klass);
4971}
4972
4973NORETURN(static void vm_super_outside(void));
4974
4975static void
4976vm_super_outside(void)
4977{
4978 rb_raise(rb_eNoMethodError, "super called outside of method");
4979}
4980
4981static const struct rb_callcache *
4982empty_cc_for_super(void)
4983{
4984 return &vm_empty_cc_for_super;
4985}
4986
4987static const struct rb_callcache *
4988vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
4989{
4990 VALUE current_defined_class;
4991 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
4992
4993 if (!me) {
4994 vm_super_outside();
4995 }
4996
4997 current_defined_class = vm_defined_class_for_protected_call(me);
4998
4999 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5000 reg_cfp->iseq != method_entry_iseqptr(me) &&
5001 !rb_obj_is_kind_of(recv, current_defined_class)) {
5002 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5003 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5004
5005 if (m) { /* not bound UnboundMethod */
5006 rb_raise(rb_eTypeError,
5007 "self has wrong type to call super in this context: "
5008 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5009 rb_obj_class(recv), m);
5010 }
5011 }
5012
5013 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5014 rb_raise(rb_eRuntimeError,
5015 "implicit argument passing of super from method defined"
5016 " by define_method() is not supported."
5017 " Specify all arguments explicitly.");
5018 }
5019
5020 ID mid = me->def->original_id;
5021
5022 if (!vm_ci_markable(cd->ci)) {
5023 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5024 }
5025 else {
5026 // update iseq. really? (TODO)
5027 cd->ci = vm_ci_new_runtime(mid,
5028 vm_ci_flag(cd->ci),
5029 vm_ci_argc(cd->ci),
5030 vm_ci_kwarg(cd->ci));
5031
5032 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5033 }
5034
5035 const struct rb_callcache *cc;
5036
5037 VALUE klass = vm_search_normal_superclass(me->defined_class);
5038
5039 if (!klass) {
5040 /* bound instance method of module */
5041 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5042 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5043 }
5044 else {
5045 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5046 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5047
5048 // define_method can cache for different method id
5049 if (cached_cme == NULL) {
5050 // empty_cc_for_super is not markable object
5051 cd->cc = empty_cc_for_super();
5052 }
5053 else if (cached_cme->called_id != mid) {
5054 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5055 if (cme) {
5056 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5057 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5058 }
5059 else {
5060 cd->cc = cc = empty_cc_for_super();
5061 }
5062 }
5063 else {
5064 switch (cached_cme->def->type) {
5065 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5066 case VM_METHOD_TYPE_REFINED:
5067 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5068 case VM_METHOD_TYPE_ATTRSET:
5069 case VM_METHOD_TYPE_IVAR:
5070 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5071 break;
5072 default:
5073 break; // use fastpath
5074 }
5075 }
5076 }
5077
5078 VM_ASSERT((vm_cc_cme(cc), true));
5079
5080 return cc;
5081}
5082
5083/* yield */
5084
5085static inline int
5086block_proc_is_lambda(const VALUE procval)
5087{
5088 rb_proc_t *proc;
5089
5090 if (procval) {
5091 GetProcPtr(procval, proc);
5092 return proc->is_lambda;
5093 }
5094 else {
5095 return 0;
5096 }
5097}
5098
5099static inline const rb_namespace_t *
5100block_proc_namespace(const VALUE procval)
5101{
5102 rb_proc_t *proc;
5103
5104 if (procval) {
5105 GetProcPtr(procval, proc);
5106 return proc->ns;
5107 }
5108 else {
5109 return NULL;
5110 }
5111}
5112
5113static VALUE
5114vm_yield_with_cfunc(rb_execution_context_t *ec,
5115 const struct rb_captured_block *captured,
5116 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5118{
5119 int is_lambda = FALSE; /* TODO */
5120 VALUE val, arg, blockarg;
5121 int frame_flag;
5122 const struct vm_ifunc *ifunc = captured->code.ifunc;
5123
5124 if (is_lambda) {
5125 arg = rb_ary_new4(argc, argv);
5126 }
5127 else if (argc == 0) {
5128 arg = Qnil;
5129 }
5130 else {
5131 arg = argv[0];
5132 }
5133
5134 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5135
5136 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5137 if (kw_splat) {
5138 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5139 }
5140
5141 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5142 frame_flag,
5143 self,
5144 VM_GUARDED_PREV_EP(captured->ep),
5145 (VALUE)me,
5146 0, ec->cfp->sp, 0, 0);
5147 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5148 rb_vm_pop_frame(ec);
5149
5150 return val;
5151}
5152
5153VALUE
5154rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5155{
5156 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5157}
5158
5159static VALUE
5160vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5161{
5162 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5163}
5164
5165static inline int
5166vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5167{
5168 int i;
5169 long len = RARRAY_LEN(ary);
5170
5171 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5172
5173 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5174 argv[i] = RARRAY_AREF(ary, i);
5175 }
5176
5177 return i;
5178}
5179
5180static inline VALUE
5181vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5182{
5183 VALUE ary, arg0 = argv[0];
5184 ary = rb_check_array_type(arg0);
5185#if 0
5186 argv[0] = arg0;
5187#else
5188 VM_ASSERT(argv[0] == arg0);
5189#endif
5190 return ary;
5191}
5192
5193static int
5194vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5195{
5196 if (rb_simple_iseq_p(iseq)) {
5197 rb_control_frame_t *cfp = ec->cfp;
5198 VALUE arg0;
5199
5200 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5201
5202 if (arg_setup_type == arg_setup_block &&
5203 calling->argc == 1 &&
5204 ISEQ_BODY(iseq)->param.flags.has_lead &&
5205 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5206 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5207 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5208 }
5209
5210 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5211 if (arg_setup_type == arg_setup_block) {
5212 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5213 int i;
5214 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5215 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5216 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5217 }
5218 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5219 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5220 }
5221 }
5222 else {
5223 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5224 }
5225 }
5226
5227 return 0;
5228 }
5229 else {
5230 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5231 }
5232}
5233
5234static int
5235vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5236{
5237 struct rb_calling_info calling_entry, *calling;
5238
5239 calling = &calling_entry;
5240 calling->argc = argc;
5241 calling->block_handler = block_handler;
5242 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5243 calling->recv = Qundef;
5244 calling->heap_argv = 0;
5245 calling->cc = NULL;
5246 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5247
5248 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5249}
5250
5251/* ruby iseq -> ruby block */
5252
5253static VALUE
5254vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5255 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5256 bool is_lambda, VALUE block_handler)
5257{
5258 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5259 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5260 const int arg_size = ISEQ_BODY(iseq)->param.size;
5261 VALUE * const rsp = GET_SP() - calling->argc;
5262 VALUE * const argv = rsp;
5263 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5264 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5265
5266 SET_SP(rsp);
5267
5268 if (calling->proc_ns) {
5269 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5270 }
5271
5272 vm_push_frame(ec, iseq,
5273 frame_flag,
5274 captured->self,
5275 VM_GUARDED_PREV_EP(captured->ep), 0,
5276 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5277 rsp + arg_size,
5278 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5279
5280 return Qundef;
5281}
5282
5283static VALUE
5284vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5285 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5286 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5287{
5288 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5289 int flags = vm_ci_flag(ci);
5290
5291 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5292 ((calling->argc == 0) ||
5293 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5294 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5295 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5296 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5297 flags = 0;
5298 if (UNLIKELY(calling->heap_argv)) {
5299#if VM_ARGC_STACK_MAX < 0
5300 if (RARRAY_LEN(calling->heap_argv) < 1) {
5301 rb_raise(rb_eArgError, "no receiver given");
5302 }
5303#endif
5304 calling->recv = rb_ary_shift(calling->heap_argv);
5305 // Modify stack to avoid cfp consistency error
5306 reg_cfp->sp++;
5307 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5308 reg_cfp->sp[-2] = calling->recv;
5309 flags |= VM_CALL_ARGS_SPLAT;
5310 }
5311 else {
5312 if (calling->argc < 1) {
5313 rb_raise(rb_eArgError, "no receiver given");
5314 }
5315 calling->recv = TOPN(--calling->argc);
5316 }
5317 if (calling->kw_splat) {
5318 flags |= VM_CALL_KW_SPLAT;
5319 }
5320 }
5321 else {
5322 if (calling->argc < 1) {
5323 rb_raise(rb_eArgError, "no receiver given");
5324 }
5325 calling->recv = TOPN(--calling->argc);
5326 }
5327
5328 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5329}
5330
5331static VALUE
5332vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5333 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5334 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5335{
5336 VALUE val;
5337 int argc;
5338 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5339 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5340 argc = calling->argc;
5341 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5342 POPN(argc); /* TODO: should put before C/yield? */
5343 return val;
5344}
5345
5346static VALUE
5347vm_proc_to_block_handler(VALUE procval)
5348{
5349 const struct rb_block *block = vm_proc_block(procval);
5350
5351 switch (vm_block_type(block)) {
5352 case block_type_iseq:
5353 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5354 case block_type_ifunc:
5355 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5356 case block_type_symbol:
5357 return VM_BH_FROM_SYMBOL(block->as.symbol);
5358 case block_type_proc:
5359 return VM_BH_FROM_PROC(block->as.proc);
5360 }
5361 VM_UNREACHABLE(vm_yield_with_proc);
5362 return Qundef;
5363}
5364
5365static VALUE
5366vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5367 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5368 bool is_lambda, VALUE block_handler)
5369{
5370 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5371 VALUE proc = VM_BH_TO_PROC(block_handler);
5372 if (!calling->proc_ns) {
5373 calling->proc_ns = block_proc_namespace(proc);
5374 }
5375 is_lambda = block_proc_is_lambda(proc);
5376 block_handler = vm_proc_to_block_handler(proc);
5377 }
5378
5379 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5380}
5381
5382static inline VALUE
5383vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5384 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5385 bool is_lambda, VALUE block_handler)
5386{
5387 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5388 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5389 bool is_lambda, VALUE block_handler);
5390
5391 switch (vm_block_handler_type(block_handler)) {
5392 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5393 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5394 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5395 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5396 default: rb_bug("vm_invoke_block: unreachable");
5397 }
5398
5399 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5400}
5401
5402static VALUE
5403vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5404{
5405 const rb_execution_context_t *ec = GET_EC();
5406 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5407 struct rb_captured_block *captured;
5408
5409 if (cfp == 0) {
5410 rb_bug("vm_make_proc_with_iseq: unreachable");
5411 }
5412
5413 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5414 captured->code.iseq = blockiseq;
5415
5416 return rb_vm_make_proc(ec, captured, rb_cProc);
5417}
5418
5419static VALUE
5420vm_once_exec(VALUE iseq)
5421{
5422 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5423 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5424}
5425
5426static VALUE
5427vm_once_clear(VALUE data)
5428{
5429 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5430 is->once.running_thread = NULL;
5431 return Qnil;
5432}
5433
5434/* defined insn */
5435
5436static bool
5437check_respond_to_missing(VALUE obj, VALUE v)
5438{
5439 VALUE args[2];
5440 VALUE r;
5441
5442 args[0] = obj; args[1] = Qfalse;
5443 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5444 if (!UNDEF_P(r) && RTEST(r)) {
5445 return true;
5446 }
5447 else {
5448 return false;
5449 }
5450}
5451
5452static bool
5453vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5454{
5455 VALUE klass;
5456 enum defined_type type = (enum defined_type)op_type;
5457
5458 switch (type) {
5459 case DEFINED_IVAR:
5460 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5461 break;
5462 case DEFINED_GVAR:
5463 return rb_gvar_defined(SYM2ID(obj));
5464 break;
5465 case DEFINED_CVAR: {
5466 const rb_cref_t *cref = vm_get_cref(GET_EP());
5467 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5468 return rb_cvar_defined(klass, SYM2ID(obj));
5469 break;
5470 }
5471 case DEFINED_CONST:
5472 case DEFINED_CONST_FROM: {
5473 bool allow_nil = type == DEFINED_CONST;
5474 klass = v;
5475 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5476 break;
5477 }
5478 case DEFINED_FUNC:
5479 klass = CLASS_OF(v);
5480 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5481 break;
5482 case DEFINED_METHOD:{
5483 VALUE klass = CLASS_OF(v);
5484 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5485
5486 if (me) {
5487 switch (METHOD_ENTRY_VISI(me)) {
5488 case METHOD_VISI_PRIVATE:
5489 break;
5490 case METHOD_VISI_PROTECTED:
5491 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5492 break;
5493 }
5494 case METHOD_VISI_PUBLIC:
5495 return true;
5496 break;
5497 default:
5498 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5499 }
5500 }
5501 else {
5502 return check_respond_to_missing(obj, v);
5503 }
5504 break;
5505 }
5506 case DEFINED_YIELD:
5507 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5508 return true;
5509 }
5510 break;
5511 case DEFINED_ZSUPER:
5512 {
5513 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5514
5515 if (me) {
5516 VALUE klass = vm_search_normal_superclass(me->defined_class);
5517 if (!klass) return false;
5518
5519 ID id = me->def->original_id;
5520
5521 return rb_method_boundp(klass, id, 0);
5522 }
5523 }
5524 break;
5525 case DEFINED_REF:
5526 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5527 default:
5528 rb_bug("unimplemented defined? type (VM)");
5529 break;
5530 }
5531
5532 return false;
5533}
5534
5535bool
5536rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5537{
5538 return vm_defined(ec, reg_cfp, op_type, obj, v);
5539}
5540
5541static const VALUE *
5542vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5543{
5544 rb_num_t i;
5545 const VALUE *ep = reg_ep;
5546 for (i = 0; i < lv; i++) {
5547 ep = GET_PREV_EP(ep);
5548 }
5549 return ep;
5550}
5551
5552static VALUE
5553vm_get_special_object(const VALUE *const reg_ep,
5554 enum vm_special_object_type type)
5555{
5556 switch (type) {
5557 case VM_SPECIAL_OBJECT_VMCORE:
5558 return rb_mRubyVMFrozenCore;
5559 case VM_SPECIAL_OBJECT_CBASE:
5560 return vm_get_cbase(reg_ep);
5561 case VM_SPECIAL_OBJECT_CONST_BASE:
5562 return vm_get_const_base(reg_ep);
5563 default:
5564 rb_bug("putspecialobject insn: unknown value_type %d", type);
5565 }
5566}
5567
5568// ZJIT implementation is using the C function
5569// and needs to call a non-static function
5570VALUE
5571rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5572{
5573 return vm_get_special_object(reg_ep, type);
5574}
5575
5576static VALUE
5577vm_concat_array(VALUE ary1, VALUE ary2st)
5578{
5579 const VALUE ary2 = ary2st;
5580 VALUE tmp1 = rb_check_to_array(ary1);
5581 VALUE tmp2 = rb_check_to_array(ary2);
5582
5583 if (NIL_P(tmp1)) {
5584 tmp1 = rb_ary_new3(1, ary1);
5585 }
5586 if (tmp1 == ary1) {
5587 tmp1 = rb_ary_dup(ary1);
5588 }
5589
5590 if (NIL_P(tmp2)) {
5591 return rb_ary_push(tmp1, ary2);
5592 }
5593 else {
5594 return rb_ary_concat(tmp1, tmp2);
5595 }
5596}
5597
5598static VALUE
5599vm_concat_to_array(VALUE ary1, VALUE ary2st)
5600{
5601 /* ary1 must be a newly created array */
5602 const VALUE ary2 = ary2st;
5603
5604 if (NIL_P(ary2)) return ary1;
5605
5606 VALUE tmp2 = rb_check_to_array(ary2);
5607
5608 if (NIL_P(tmp2)) {
5609 return rb_ary_push(ary1, ary2);
5610 }
5611 else {
5612 return rb_ary_concat(ary1, tmp2);
5613 }
5614}
5615
5616// YJIT implementation is using the C function
5617// and needs to call a non-static function
5618VALUE
5619rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5620{
5621 return vm_concat_array(ary1, ary2st);
5622}
5623
5624VALUE
5625rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5626{
5627 return vm_concat_to_array(ary1, ary2st);
5628}
5629
5630static VALUE
5631vm_splat_array(VALUE flag, VALUE ary)
5632{
5633 if (NIL_P(ary)) {
5634 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5635 }
5636 VALUE tmp = rb_check_to_array(ary);
5637 if (NIL_P(tmp)) {
5638 return rb_ary_new3(1, ary);
5639 }
5640 else if (RTEST(flag)) {
5641 return rb_ary_dup(tmp);
5642 }
5643 else {
5644 return tmp;
5645 }
5646}
5647
5648// YJIT implementation is using the C function
5649// and needs to call a non-static function
5650VALUE
5651rb_vm_splat_array(VALUE flag, VALUE ary)
5652{
5653 return vm_splat_array(flag, ary);
5654}
5655
5656static VALUE
5657vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5658{
5659 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5660
5661 if (flag & VM_CHECKMATCH_ARRAY) {
5662 long i;
5663 const long n = RARRAY_LEN(pattern);
5664
5665 for (i = 0; i < n; i++) {
5666 VALUE v = RARRAY_AREF(pattern, i);
5667 VALUE c = check_match(ec, v, target, type);
5668
5669 if (RTEST(c)) {
5670 return c;
5671 }
5672 }
5673 return Qfalse;
5674 }
5675 else {
5676 return check_match(ec, pattern, target, type);
5677 }
5678}
5679
5680VALUE
5681rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5682{
5683 return vm_check_match(ec, target, pattern, flag);
5684}
5685
5686static VALUE
5687vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5688{
5689 const VALUE kw_bits = *(ep - bits);
5690
5691 if (FIXNUM_P(kw_bits)) {
5692 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5693 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5694 return Qfalse;
5695 }
5696 else {
5697 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5698 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5699 }
5700 return Qtrue;
5701}
5702
5703static void
5704vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5705{
5706 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5707 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5708 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5709 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5710
5711 switch (flag) {
5712 case RUBY_EVENT_CALL:
5713 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5714 return;
5715 case RUBY_EVENT_C_CALL:
5716 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5717 return;
5718 case RUBY_EVENT_RETURN:
5719 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5720 return;
5722 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5723 return;
5724 }
5725 }
5726}
5727
5728static VALUE
5729vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5730{
5731 if (!rb_const_defined_at(cbase, id)) {
5732 return 0;
5733 }
5734 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5735 return rb_public_const_get_at(cbase, id);
5736 }
5737 else {
5738 return rb_const_get_at(cbase, id);
5739 }
5740}
5741
5742static VALUE
5743vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5744{
5745 if (!RB_TYPE_P(klass, T_CLASS)) {
5746 return 0;
5747 }
5748 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5749 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5750
5751 if (tmp != super) {
5752 rb_raise(rb_eTypeError,
5753 "superclass mismatch for class %"PRIsVALUE"",
5754 rb_id2str(id));
5755 }
5756 else {
5757 return klass;
5758 }
5759 }
5760 else {
5761 return klass;
5762 }
5763}
5764
5765static VALUE
5766vm_check_if_module(ID id, VALUE mod)
5767{
5768 if (!RB_TYPE_P(mod, T_MODULE)) {
5769 return 0;
5770 }
5771 else {
5772 return mod;
5773 }
5774}
5775
5776static VALUE
5777declare_under(ID id, VALUE cbase, VALUE c)
5778{
5779 rb_set_class_path_string(c, cbase, rb_id2str(id));
5780 rb_const_set(cbase, id, c);
5781 return c;
5782}
5783
5784static VALUE
5785vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5786{
5787 /* new class declaration */
5788 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5789 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5791 rb_class_inherited(s, c);
5792 return c;
5793}
5794
5795static VALUE
5796vm_declare_module(ID id, VALUE cbase)
5797{
5798 /* new module declaration */
5799 return declare_under(id, cbase, rb_module_new());
5800}
5801
5802NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5803static void
5804unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5805{
5806 VALUE name = rb_id2str(id);
5807 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5808 name, type);
5809 VALUE location = rb_const_source_location_at(cbase, id);
5810 if (!NIL_P(location)) {
5811 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5812 " previous definition of %"PRIsVALUE" was here",
5813 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5814 }
5816}
5817
5818static VALUE
5819vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5820{
5821 VALUE klass;
5822
5823 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5824 rb_raise(rb_eTypeError,
5825 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5826 rb_obj_class(super));
5827 }
5828
5829 vm_check_if_namespace(cbase);
5830
5831 /* find klass */
5832 rb_autoload_load(cbase, id);
5833
5834 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5835 if (!vm_check_if_class(id, flags, super, klass))
5836 unmatched_redefinition("class", cbase, id, klass);
5837 return klass;
5838 }
5839 else {
5840 return vm_declare_class(id, flags, cbase, super);
5841 }
5842}
5843
5844static VALUE
5845vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5846{
5847 VALUE mod;
5848
5849 vm_check_if_namespace(cbase);
5850 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5851 if (!vm_check_if_module(id, mod))
5852 unmatched_redefinition("module", cbase, id, mod);
5853 return mod;
5854 }
5855 else {
5856 return vm_declare_module(id, cbase);
5857 }
5858}
5859
5860static VALUE
5861vm_find_or_create_class_by_id(ID id,
5862 rb_num_t flags,
5863 VALUE cbase,
5864 VALUE super)
5865{
5866 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5867
5868 switch (type) {
5869 case VM_DEFINECLASS_TYPE_CLASS:
5870 /* classdef returns class scope value */
5871 return vm_define_class(id, flags, cbase, super);
5872
5873 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5874 /* classdef returns class scope value */
5875 return rb_singleton_class(cbase);
5876
5877 case VM_DEFINECLASS_TYPE_MODULE:
5878 /* classdef returns class scope value */
5879 return vm_define_module(id, flags, cbase);
5880
5881 default:
5882 rb_bug("unknown defineclass type: %d", (int)type);
5883 }
5884}
5885
5886static rb_method_visibility_t
5887vm_scope_visibility_get(const rb_execution_context_t *ec)
5888{
5889 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5890
5891 if (!vm_env_cref_by_cref(cfp->ep)) {
5892 return METHOD_VISI_PUBLIC;
5893 }
5894 else {
5895 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5896 }
5897}
5898
5899static int
5900vm_scope_module_func_check(const rb_execution_context_t *ec)
5901{
5902 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5903
5904 if (!vm_env_cref_by_cref(cfp->ep)) {
5905 return FALSE;
5906 }
5907 else {
5908 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5909 }
5910}
5911
5912static void
5913vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5914{
5915 VALUE klass;
5916 rb_method_visibility_t visi;
5917 rb_cref_t *cref = vm_ec_cref(ec);
5918
5919 if (is_singleton) {
5920 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5921 visi = METHOD_VISI_PUBLIC;
5922 }
5923 else {
5924 klass = CREF_CLASS_FOR_DEFINITION(cref);
5925 visi = vm_scope_visibility_get(ec);
5926 }
5927
5928 if (NIL_P(klass)) {
5929 rb_raise(rb_eTypeError, "no class/module to add method");
5930 }
5931
5932 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5933 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5934 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5935 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
5936 }
5937
5938 if (!is_singleton && vm_scope_module_func_check(ec)) {
5939 klass = rb_singleton_class(klass);
5940 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5941 }
5942}
5943
5944static VALUE
5945vm_invokeblock_i(struct rb_execution_context_struct *ec,
5946 struct rb_control_frame_struct *reg_cfp,
5947 struct rb_calling_info *calling)
5948{
5949 const struct rb_callinfo *ci = calling->cd->ci;
5950 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5951
5952 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5953 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5954 }
5955 else {
5956 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5957 }
5958}
5959
5960enum method_explorer_type {
5961 mexp_search_method,
5962 mexp_search_invokeblock,
5963 mexp_search_super,
5964};
5965
5966static inline VALUE
5967vm_sendish(
5968 struct rb_execution_context_struct *ec,
5969 struct rb_control_frame_struct *reg_cfp,
5970 struct rb_call_data *cd,
5971 VALUE block_handler,
5972 enum method_explorer_type method_explorer
5973) {
5974 VALUE val = Qundef;
5975 const struct rb_callinfo *ci = cd->ci;
5976 const struct rb_callcache *cc;
5977 int argc = vm_ci_argc(ci);
5978 VALUE recv = TOPN(argc);
5979 struct rb_calling_info calling = {
5980 .block_handler = block_handler,
5981 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5982 .recv = recv,
5983 .argc = argc,
5984 .cd = cd,
5985 };
5986
5987 switch (method_explorer) {
5988 case mexp_search_method:
5989 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5990 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5991 break;
5992 case mexp_search_super:
5993 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5994 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5995 break;
5996 case mexp_search_invokeblock:
5997 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5998 break;
5999 }
6000 return val;
6001}
6002
6003VALUE
6004rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6005{
6006 stack_check(ec);
6007
6008 struct rb_forwarding_call_data adjusted_cd;
6009 struct rb_callinfo adjusted_ci;
6010
6011 VALUE bh;
6012 VALUE val;
6013
6014 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6015 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6016
6017 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6018
6019 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6020 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6021 }
6022 }
6023 else {
6024 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6025 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6026 }
6027
6028 VM_EXEC(ec, val);
6029 return val;
6030}
6031
6032VALUE
6033rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6034{
6035 stack_check(ec);
6036 VALUE bh = VM_BLOCK_HANDLER_NONE;
6037 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6038 VM_EXEC(ec, val);
6039 return val;
6040}
6041
6042VALUE
6043rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6044{
6045 stack_check(ec);
6046 struct rb_forwarding_call_data adjusted_cd;
6047 struct rb_callinfo adjusted_ci;
6048
6049 VALUE bh;
6050 VALUE val;
6051
6052 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6053 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6054
6055 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6056
6057 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6058 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6059 }
6060 }
6061 else {
6062 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6063 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6064 }
6065
6066 VM_EXEC(ec, val);
6067 return val;
6068}
6069
6070VALUE
6071rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6072{
6073 stack_check(ec);
6074 VALUE bh = VM_BLOCK_HANDLER_NONE;
6075 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6076 VM_EXEC(ec, val);
6077 return val;
6078}
6079
6080/* object.c */
6081VALUE rb_nil_to_s(VALUE);
6082VALUE rb_true_to_s(VALUE);
6083VALUE rb_false_to_s(VALUE);
6084/* numeric.c */
6085VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6086VALUE rb_fix_to_s(VALUE);
6087/* variable.c */
6088VALUE rb_mod_to_s(VALUE);
6090
6091static VALUE
6092vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6093{
6094 int type = TYPE(recv);
6095 if (type == T_STRING) {
6096 return recv;
6097 }
6098
6099 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6100
6101 switch (type) {
6102 case T_SYMBOL:
6103 if (check_method_basic_definition(vm_cc_cme(cc))) {
6104 // rb_sym_to_s() allocates a mutable string, but since we are only
6105 // going to use this string for interpolation, it's fine to use the
6106 // frozen string.
6107 return rb_sym2str(recv);
6108 }
6109 break;
6110 case T_MODULE:
6111 case T_CLASS:
6112 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6113 // rb_mod_to_s() allocates a mutable string, but since we are only
6114 // going to use this string for interpolation, it's fine to use the
6115 // frozen string.
6116 VALUE val = rb_mod_name(recv);
6117 if (NIL_P(val)) {
6118 val = rb_mod_to_s(recv);
6119 }
6120 return val;
6121 }
6122 break;
6123 case T_NIL:
6124 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6125 return rb_nil_to_s(recv);
6126 }
6127 break;
6128 case T_TRUE:
6129 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6130 return rb_true_to_s(recv);
6131 }
6132 break;
6133 case T_FALSE:
6134 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6135 return rb_false_to_s(recv);
6136 }
6137 break;
6138 case T_FIXNUM:
6139 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6140 return rb_fix_to_s(recv);
6141 }
6142 break;
6143 }
6144 return Qundef;
6145}
6146
6147static VALUE
6148vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6149{
6150 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6151 return ary;
6152 }
6153 else {
6154 return Qundef;
6155 }
6156}
6157
6158static VALUE
6159vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6160{
6161 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6162 return hash;
6163 }
6164 else {
6165 return Qundef;
6166 }
6167}
6168
6169static VALUE
6170vm_opt_str_freeze(VALUE str, int bop, ID id)
6171{
6172 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6173 return str;
6174 }
6175 else {
6176 return Qundef;
6177 }
6178}
6179
6180/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6181#define id_cmp idCmp
6182
6183static VALUE
6184vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6185{
6186 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6187 return rb_ary_includes(ary, target);
6188 }
6189 else {
6190 VALUE args[1] = {target};
6191
6192 // duparray
6193 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6194 VALUE dupary = rb_ary_resurrect(ary);
6195
6196 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6197 }
6198}
6199
6200VALUE
6201rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6202{
6203 return vm_opt_duparray_include_p(ec, ary, target);
6204}
6205
6206static VALUE
6207vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6208{
6209 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6210 if (num == 0) {
6211 return Qnil;
6212 }
6213 else {
6214 VALUE result = *ptr;
6215 rb_snum_t i = num - 1;
6216 while (i-- > 0) {
6217 const VALUE v = *++ptr;
6218 if (OPTIMIZED_CMP(v, result) > 0) {
6219 result = v;
6220 }
6221 }
6222 return result;
6223 }
6224 }
6225 else {
6226 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6227 }
6228}
6229
6230VALUE
6231rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6232{
6233 return vm_opt_newarray_max(ec, num, ptr);
6234}
6235
6236static VALUE
6237vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6238{
6239 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6240 if (num == 0) {
6241 return Qnil;
6242 }
6243 else {
6244 VALUE result = *ptr;
6245 rb_snum_t i = num - 1;
6246 while (i-- > 0) {
6247 const VALUE v = *++ptr;
6248 if (OPTIMIZED_CMP(v, result) < 0) {
6249 result = v;
6250 }
6251 }
6252 return result;
6253 }
6254 }
6255 else {
6256 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6257 }
6258}
6259
6260VALUE
6261rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6262{
6263 return vm_opt_newarray_min(ec, num, ptr);
6264}
6265
6266static VALUE
6267vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6268{
6269 // If Array#hash is _not_ monkeypatched, use the optimized call
6270 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6271 return rb_ary_hash_values(num, ptr);
6272 }
6273 else {
6274 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6275 }
6276}
6277
6278VALUE
6279rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6280{
6281 return vm_opt_newarray_hash(ec, num, ptr);
6282}
6283
6284VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6285VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6286
6287static VALUE
6288vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6289{
6290 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6291 struct RArray fake_ary;
6292 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6293 return rb_ary_includes(ary, target);
6294 }
6295 else {
6296 VALUE args[1] = {target};
6297 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6298 }
6299}
6300
6301VALUE
6302rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6303{
6304 return vm_opt_newarray_include_p(ec, num, ptr, target);
6305}
6306
6307static VALUE
6308vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6309{
6310 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6311 struct RArray fake_ary;
6312 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6313 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6314 }
6315 else {
6316 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6317 // Setup an array with room for keyword hash.
6318 VALUE args[2];
6319 args[0] = fmt;
6320 int kw_splat = RB_NO_KEYWORDS;
6321 int argc = 1;
6322
6323 if (!UNDEF_P(buffer)) {
6324 args[1] = rb_hash_new_with_size(1);
6325 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6326 kw_splat = RB_PASS_KEYWORDS;
6327 argc++;
6328 }
6329
6330 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6331 }
6332}
6333
6334VALUE
6335rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6336{
6337 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6338}
6339
6340VALUE
6341rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6342{
6343 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6344}
6345
6346#undef id_cmp
6347
6348static void
6349vm_track_constant_cache(ID id, void *ic)
6350{
6351 rb_vm_t *vm = GET_VM();
6352 struct rb_id_table *const_cache = vm->constant_cache;
6353 VALUE lookup_result;
6354 set_table *ics;
6355
6356 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6357 ics = (set_table *)lookup_result;
6358 }
6359 else {
6360 ics = set_init_numtable();
6361 rb_id_table_insert(const_cache, id, (VALUE)ics);
6362 }
6363
6364 /* The call below to st_insert could allocate which could trigger a GC.
6365 * If it triggers a GC, it may free an iseq that also holds a cache to this
6366 * constant. If that iseq is the last iseq with a cache to this constant, then
6367 * it will free this ST table, which would cause an use-after-free during this
6368 * st_insert.
6369 *
6370 * So to fix this issue, we store the ID that is currently being inserted
6371 * and, in remove_from_constant_cache, we don't free the ST table for ID
6372 * equal to this one.
6373 *
6374 * See [Bug #20921].
6375 */
6376 vm->inserting_constant_cache_id = id;
6377
6378 set_insert(ics, (st_data_t)ic);
6379
6380 vm->inserting_constant_cache_id = (ID)0;
6381}
6382
6383static void
6384vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6385{
6386 RB_VM_LOCKING() {
6387 for (int i = 0; segments[i]; i++) {
6388 ID id = segments[i];
6389 if (id == idNULL) continue;
6390 vm_track_constant_cache(id, ic);
6391 }
6392 }
6393}
6394
6395// For JIT inlining
6396static inline bool
6397vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6398{
6399 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6400 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6401
6402 return (ic_cref == NULL || // no need to check CREF
6403 ic_cref == vm_get_cref(reg_ep));
6404 }
6405 return false;
6406}
6407
6408static bool
6409vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6410{
6411 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6412 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6413}
6414
6415// YJIT needs this function to never allocate and never raise
6416bool
6417rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6418{
6419 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6420}
6421
6422static void
6423vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6424{
6425 if (ruby_vm_const_missing_count > 0) {
6426 ruby_vm_const_missing_count = 0;
6427 ic->entry = NULL;
6428 return;
6429 }
6430
6431 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6432 RB_OBJ_WRITE(ice, &ice->value, val);
6433 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6434 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6435 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6436
6437 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6438 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6439 rb_yjit_constant_ic_update(iseq, ic, pos);
6440}
6441
6442VALUE
6443rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6444{
6445 VALUE val;
6446 const ID *segments = ic->segments;
6447 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6448 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6449 val = ice->value;
6450
6451 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6452 }
6453 else {
6454 ruby_vm_constant_cache_misses++;
6455 val = vm_get_ev_const_chain(ec, segments);
6456 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6457 // Undo the PC increment to get the address to this instruction
6458 // INSN_ATTR(width) == 2
6459 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6460 }
6461 return val;
6462}
6463
6464static VALUE
6465vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6466{
6467 rb_thread_t *th = rb_ec_thread_ptr(ec);
6468 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6469
6470 again:
6471 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6472 return is->once.value;
6473 }
6474 else if (is->once.running_thread == NULL) {
6475 VALUE val;
6476 is->once.running_thread = th;
6477 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6478 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6479 /* is->once.running_thread is cleared by vm_once_clear() */
6480 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6481 return val;
6482 }
6483 else if (is->once.running_thread == th) {
6484 /* recursive once */
6485 return vm_once_exec((VALUE)iseq);
6486 }
6487 else {
6488 /* waiting for finish */
6489 RUBY_VM_CHECK_INTS(ec);
6491 goto again;
6492 }
6493}
6494
6495static OFFSET
6496vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6497{
6498 switch (OBJ_BUILTIN_TYPE(key)) {
6499 case -1:
6500 case T_FLOAT:
6501 case T_SYMBOL:
6502 case T_BIGNUM:
6503 case T_STRING:
6504 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6505 SYMBOL_REDEFINED_OP_FLAG |
6506 INTEGER_REDEFINED_OP_FLAG |
6507 FLOAT_REDEFINED_OP_FLAG |
6508 NIL_REDEFINED_OP_FLAG |
6509 TRUE_REDEFINED_OP_FLAG |
6510 FALSE_REDEFINED_OP_FLAG |
6511 STRING_REDEFINED_OP_FLAG)) {
6512 st_data_t val;
6513 if (RB_FLOAT_TYPE_P(key)) {
6514 double kval = RFLOAT_VALUE(key);
6515 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6516 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6517 }
6518 }
6519 if (rb_hash_stlike_lookup(hash, key, &val)) {
6520 return FIX2LONG((VALUE)val);
6521 }
6522 else {
6523 return else_offset;
6524 }
6525 }
6526 }
6527 return 0;
6528}
6529
6530NORETURN(static void
6531 vm_stack_consistency_error(const rb_execution_context_t *ec,
6532 const rb_control_frame_t *,
6533 const VALUE *));
6534static void
6535vm_stack_consistency_error(const rb_execution_context_t *ec,
6536 const rb_control_frame_t *cfp,
6537 const VALUE *bp)
6538{
6539 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6540 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6541 static const char stack_consistency_error[] =
6542 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6543#if defined RUBY_DEVEL
6544 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6545 rb_str_cat_cstr(mesg, "\n");
6546 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6548#else
6549 rb_bug(stack_consistency_error, nsp, nbp);
6550#endif
6551}
6552
6553static VALUE
6554vm_opt_plus(VALUE recv, VALUE obj)
6555{
6556 if (FIXNUM_2_P(recv, obj) &&
6557 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6558 return rb_fix_plus_fix(recv, obj);
6559 }
6560 else if (FLONUM_2_P(recv, obj) &&
6561 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6562 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6563 }
6564 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6565 return Qundef;
6566 }
6567 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6568 RBASIC_CLASS(obj) == rb_cFloat &&
6569 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6570 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6571 }
6572 else if (RBASIC_CLASS(recv) == rb_cString &&
6573 RBASIC_CLASS(obj) == rb_cString &&
6574 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6575 return rb_str_opt_plus(recv, obj);
6576 }
6577 else if (RBASIC_CLASS(recv) == rb_cArray &&
6578 RBASIC_CLASS(obj) == rb_cArray &&
6579 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6580 return rb_ary_plus(recv, obj);
6581 }
6582 else {
6583 return Qundef;
6584 }
6585}
6586
6587static VALUE
6588vm_opt_minus(VALUE recv, VALUE obj)
6589{
6590 if (FIXNUM_2_P(recv, obj) &&
6591 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6592 return rb_fix_minus_fix(recv, obj);
6593 }
6594 else if (FLONUM_2_P(recv, obj) &&
6595 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6596 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6597 }
6598 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6599 return Qundef;
6600 }
6601 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6602 RBASIC_CLASS(obj) == rb_cFloat &&
6603 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6604 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6605 }
6606 else {
6607 return Qundef;
6608 }
6609}
6610
6611static VALUE
6612vm_opt_mult(VALUE recv, VALUE obj)
6613{
6614 if (FIXNUM_2_P(recv, obj) &&
6615 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6616 return rb_fix_mul_fix(recv, obj);
6617 }
6618 else if (FLONUM_2_P(recv, obj) &&
6619 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6620 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6621 }
6622 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6623 return Qundef;
6624 }
6625 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6626 RBASIC_CLASS(obj) == rb_cFloat &&
6627 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6628 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6629 }
6630 else {
6631 return Qundef;
6632 }
6633}
6634
6635static VALUE
6636vm_opt_div(VALUE recv, VALUE obj)
6637{
6638 if (FIXNUM_2_P(recv, obj) &&
6639 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6640 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6641 }
6642 else if (FLONUM_2_P(recv, obj) &&
6643 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6644 return rb_flo_div_flo(recv, obj);
6645 }
6646 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6647 return Qundef;
6648 }
6649 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6650 RBASIC_CLASS(obj) == rb_cFloat &&
6651 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6652 return rb_flo_div_flo(recv, obj);
6653 }
6654 else {
6655 return Qundef;
6656 }
6657}
6658
6659static VALUE
6660vm_opt_mod(VALUE recv, VALUE obj)
6661{
6662 if (FIXNUM_2_P(recv, obj) &&
6663 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6664 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6665 }
6666 else if (FLONUM_2_P(recv, obj) &&
6667 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6668 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6669 }
6670 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6671 return Qundef;
6672 }
6673 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6674 RBASIC_CLASS(obj) == rb_cFloat &&
6675 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6676 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6677 }
6678 else {
6679 return Qundef;
6680 }
6681}
6682
6683static VALUE
6684vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6685{
6686 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6687 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6688
6689 if (!UNDEF_P(val)) {
6690 return RBOOL(!RTEST(val));
6691 }
6692 }
6693
6694 return Qundef;
6695}
6696
6697static VALUE
6698vm_opt_lt(VALUE recv, VALUE obj)
6699{
6700 if (FIXNUM_2_P(recv, obj) &&
6701 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6702 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6703 }
6704 else if (FLONUM_2_P(recv, obj) &&
6705 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6706 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6707 }
6708 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6709 return Qundef;
6710 }
6711 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6712 RBASIC_CLASS(obj) == rb_cFloat &&
6713 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6714 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6715 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6716 }
6717 else {
6718 return Qundef;
6719 }
6720}
6721
6722static VALUE
6723vm_opt_le(VALUE recv, VALUE obj)
6724{
6725 if (FIXNUM_2_P(recv, obj) &&
6726 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6727 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6728 }
6729 else if (FLONUM_2_P(recv, obj) &&
6730 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6731 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6732 }
6733 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6734 return Qundef;
6735 }
6736 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6737 RBASIC_CLASS(obj) == rb_cFloat &&
6738 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6739 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6740 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6741 }
6742 else {
6743 return Qundef;
6744 }
6745}
6746
6747static VALUE
6748vm_opt_gt(VALUE recv, VALUE obj)
6749{
6750 if (FIXNUM_2_P(recv, obj) &&
6751 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6752 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6753 }
6754 else if (FLONUM_2_P(recv, obj) &&
6755 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6756 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6757 }
6758 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6759 return Qundef;
6760 }
6761 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6762 RBASIC_CLASS(obj) == rb_cFloat &&
6763 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6764 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6765 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6766 }
6767 else {
6768 return Qundef;
6769 }
6770}
6771
6772static VALUE
6773vm_opt_ge(VALUE recv, VALUE obj)
6774{
6775 if (FIXNUM_2_P(recv, obj) &&
6776 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6777 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6778 }
6779 else if (FLONUM_2_P(recv, obj) &&
6780 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6781 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6782 }
6783 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6784 return Qundef;
6785 }
6786 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6787 RBASIC_CLASS(obj) == rb_cFloat &&
6788 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6789 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6790 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6791 }
6792 else {
6793 return Qundef;
6794 }
6795}
6796
6797
6798static VALUE
6799vm_opt_ltlt(VALUE recv, VALUE obj)
6800{
6801 if (SPECIAL_CONST_P(recv)) {
6802 return Qundef;
6803 }
6804 else if (RBASIC_CLASS(recv) == rb_cString &&
6805 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6806 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6807 return rb_str_buf_append(recv, obj);
6808 }
6809 else {
6810 return rb_str_concat(recv, obj);
6811 }
6812 }
6813 else if (RBASIC_CLASS(recv) == rb_cArray &&
6814 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6815 return rb_ary_push(recv, obj);
6816 }
6817 else {
6818 return Qundef;
6819 }
6820}
6821
6822static VALUE
6823vm_opt_and(VALUE recv, VALUE obj)
6824{
6825 // If recv and obj are both fixnums, then the bottom tag bit
6826 // will be 1 on both. 1 & 1 == 1, so the result value will also
6827 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6828 // will be 0, and we return Qundef.
6829 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6830
6831 if (FIXNUM_P(ret) &&
6832 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6833 return ret;
6834 }
6835 else {
6836 return Qundef;
6837 }
6838}
6839
6840static VALUE
6841vm_opt_or(VALUE recv, VALUE obj)
6842{
6843 if (FIXNUM_2_P(recv, obj) &&
6844 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6845 return recv | obj;
6846 }
6847 else {
6848 return Qundef;
6849 }
6850}
6851
6852static VALUE
6853vm_opt_aref(VALUE recv, VALUE obj)
6854{
6855 if (SPECIAL_CONST_P(recv)) {
6856 if (FIXNUM_2_P(recv, obj) &&
6857 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6858 return rb_fix_aref(recv, obj);
6859 }
6860 return Qundef;
6861 }
6862 else if (RBASIC_CLASS(recv) == rb_cArray &&
6863 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6864 if (FIXNUM_P(obj)) {
6865 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6866 }
6867 else {
6868 return rb_ary_aref1(recv, obj);
6869 }
6870 }
6871 else if (RBASIC_CLASS(recv) == rb_cHash &&
6872 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6873 return rb_hash_aref(recv, obj);
6874 }
6875 else {
6876 return Qundef;
6877 }
6878}
6879
6880static VALUE
6881vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6882{
6883 if (SPECIAL_CONST_P(recv)) {
6884 return Qundef;
6885 }
6886 else if (RBASIC_CLASS(recv) == rb_cArray &&
6887 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6888 FIXNUM_P(obj)) {
6889 rb_ary_store(recv, FIX2LONG(obj), set);
6890 return set;
6891 }
6892 else if (RBASIC_CLASS(recv) == rb_cHash &&
6893 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6894 rb_hash_aset(recv, obj, set);
6895 return set;
6896 }
6897 else {
6898 return Qundef;
6899 }
6900}
6901
6902static VALUE
6903vm_opt_aref_with(VALUE recv, VALUE key)
6904{
6905 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6906 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6907 rb_hash_compare_by_id_p(recv) == Qfalse &&
6908 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6909 return rb_hash_aref(recv, key);
6910 }
6911 else {
6912 return Qundef;
6913 }
6914}
6915
6916VALUE
6917rb_vm_opt_aref_with(VALUE recv, VALUE key)
6918{
6919 return vm_opt_aref_with(recv, key);
6920}
6921
6922static VALUE
6923vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6924{
6925 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6926 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6927 rb_hash_compare_by_id_p(recv) == Qfalse) {
6928 return rb_hash_aset(recv, key, val);
6929 }
6930 else {
6931 return Qundef;
6932 }
6933}
6934
6935VALUE
6936rb_vm_opt_aset_with(VALUE recv, VALUE key, VALUE value)
6937{
6938 return vm_opt_aset_with(recv, key, value);
6939}
6940
6941static VALUE
6942vm_opt_length(VALUE recv, int bop)
6943{
6944 if (SPECIAL_CONST_P(recv)) {
6945 return Qundef;
6946 }
6947 else if (RBASIC_CLASS(recv) == rb_cString &&
6948 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6949 if (bop == BOP_EMPTY_P) {
6950 return LONG2NUM(RSTRING_LEN(recv));
6951 }
6952 else {
6953 return rb_str_length(recv);
6954 }
6955 }
6956 else if (RBASIC_CLASS(recv) == rb_cArray &&
6957 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6958 return LONG2NUM(RARRAY_LEN(recv));
6959 }
6960 else if (RBASIC_CLASS(recv) == rb_cHash &&
6961 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6962 return INT2FIX(RHASH_SIZE(recv));
6963 }
6964 else {
6965 return Qundef;
6966 }
6967}
6968
6969static VALUE
6970vm_opt_empty_p(VALUE recv)
6971{
6972 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6973 case Qundef: return Qundef;
6974 case INT2FIX(0): return Qtrue;
6975 default: return Qfalse;
6976 }
6977}
6978
6979VALUE rb_false(VALUE obj);
6980
6981static VALUE
6982vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6983{
6984 if (NIL_P(recv) &&
6985 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6986 return Qtrue;
6987 }
6988 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6989 return Qfalse;
6990 }
6991 else {
6992 return Qundef;
6993 }
6994}
6995
6996static VALUE
6997fix_succ(VALUE x)
6998{
6999 switch (x) {
7000 case ~0UL:
7001 /* 0xFFFF_FFFF == INT2FIX(-1)
7002 * `-1.succ` is of course 0. */
7003 return INT2FIX(0);
7004 case RSHIFT(~0UL, 1):
7005 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7006 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7007 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7008 default:
7009 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7010 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7011 * == lx*2 + ly*2 + 1
7012 * == (lx*2+1) + (ly*2+1) - 1
7013 * == x + y - 1
7014 *
7015 * Here, if we put y := INT2FIX(1):
7016 *
7017 * == x + INT2FIX(1) - 1
7018 * == x + 2 .
7019 */
7020 return x + 2;
7021 }
7022}
7023
7024static VALUE
7025vm_opt_succ(VALUE recv)
7026{
7027 if (FIXNUM_P(recv) &&
7028 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7029 return fix_succ(recv);
7030 }
7031 else if (SPECIAL_CONST_P(recv)) {
7032 return Qundef;
7033 }
7034 else if (RBASIC_CLASS(recv) == rb_cString &&
7035 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7036 return rb_str_succ(recv);
7037 }
7038 else {
7039 return Qundef;
7040 }
7041}
7042
7043static VALUE
7044vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7045{
7046 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7047 return RBOOL(!RTEST(recv));
7048 }
7049 else {
7050 return Qundef;
7051 }
7052}
7053
7054static VALUE
7055vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7056{
7057 if (SPECIAL_CONST_P(recv)) {
7058 return Qundef;
7059 }
7060 else if (RBASIC_CLASS(recv) == rb_cString &&
7061 CLASS_OF(obj) == rb_cRegexp &&
7062 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7063 return rb_reg_match(obj, recv);
7064 }
7065 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7066 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7067 return rb_reg_match(recv, obj);
7068 }
7069 else {
7070 return Qundef;
7071 }
7072}
7073
7074rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7075
7076NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7077
7078static inline void
7079vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7080 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7081 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7082{
7083 rb_event_flag_t event = pc_events & target_event;
7084 VALUE self = GET_SELF();
7085
7086 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7087
7088 if (event & global_hooks->events) {
7089 /* increment PC because source line is calculated with PC-1 */
7090 reg_cfp->pc++;
7091 vm_dtrace(event, ec);
7092 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7093 reg_cfp->pc--;
7094 }
7095
7096 // Load here since global hook above can add and free local hooks
7097 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7098 if (local_hooks != NULL) {
7099 if (event & local_hooks->events) {
7100 /* increment PC because source line is calculated with PC-1 */
7101 reg_cfp->pc++;
7102 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7103 reg_cfp->pc--;
7104 }
7105 }
7106}
7107
7108#define VM_TRACE_HOOK(target_event, val) do { \
7109 if ((pc_events & (target_event)) & enabled_flags) { \
7110 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7111 } \
7112} while (0)
7113
7114static VALUE
7115rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7116{
7117 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7118 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7119 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7120}
7121
7122static void
7123vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7124{
7125 const VALUE *pc = reg_cfp->pc;
7126 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7127 rb_event_flag_t global_events = enabled_flags;
7128
7129 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7130 return;
7131 }
7132 else {
7133 const rb_iseq_t *iseq = reg_cfp->iseq;
7134 VALUE iseq_val = (VALUE)iseq;
7135 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7136 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7137 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7138 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7139 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7140 rb_hook_list_t *bmethod_local_hooks = NULL;
7141 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7142 rb_event_flag_t bmethod_local_events = 0;
7143 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7144 enabled_flags |= iseq_local_events;
7145
7146 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7147
7148 if (bmethod_frame) {
7149 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7150 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7151 bmethod_local_hooks = me->def->body.bmethod.hooks;
7152 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7153 if (bmethod_local_hooks) {
7154 bmethod_local_events = bmethod_local_hooks->events;
7155 }
7156 }
7157
7158
7159 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7160#if 0
7161 /* disable trace */
7162 /* TODO: incomplete */
7163 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7164#else
7165 /* do not disable trace because of performance problem
7166 * (re-enable overhead)
7167 */
7168#endif
7169 return;
7170 }
7171 else if (ec->trace_arg != NULL) {
7172 /* already tracing */
7173 return;
7174 }
7175 else {
7176 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7177 /* Note, not considering iseq local events here since the same
7178 * iseq could be used in multiple bmethods. */
7179 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7180
7181 if (0) {
7182 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7183 (int)pos,
7184 (int)pc_events,
7185 RSTRING_PTR(rb_iseq_path(iseq)),
7186 (int)rb_iseq_line_no(iseq, pos),
7187 RSTRING_PTR(rb_iseq_label(iseq)));
7188 }
7189 VM_ASSERT(reg_cfp->pc == pc);
7190 VM_ASSERT(pc_events != 0);
7191
7192 /* check traces */
7193 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7194 /* b_call instruction running as a method. Fire call event. */
7195 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7196 }
7198 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7199 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7200 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7201 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7202 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7203 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7204 /* b_return instruction running as a method. Fire return event. */
7205 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7206 }
7207
7208 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7209 // We need the pointer to stay valid in case compaction happens in a trace hook.
7210 //
7211 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7212 // storage for `rb_method_definition_t` is not on the GC heap.
7213 RB_GC_GUARD(iseq_val);
7214 }
7215 }
7216}
7217#undef VM_TRACE_HOOK
7218
7219#if VM_CHECK_MODE > 0
7220NORETURN( NOINLINE( COLDFUNC
7221void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7222
7223void
7224Init_vm_stack_canary(void)
7225{
7226 /* This has to be called _after_ our PRNG is properly set up. */
7227 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7228 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7229
7230 vm_stack_canary_was_born = true;
7231 VM_ASSERT(n == 0);
7232}
7233
7234void
7235rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7236{
7237 /* Because a method has already been called, why not call
7238 * another one. */
7239 const char *insn = rb_insns_name(i);
7240 VALUE inspection = rb_inspect(c);
7241 const char *str = StringValueCStr(inspection);
7242
7243 rb_bug("dead canary found at %s: %s", insn, str);
7244}
7245
7246#else
7247void Init_vm_stack_canary(void) { /* nothing to do */ }
7248#endif
7249
7250
7251/* a part of the following code is generated by this ruby script:
7252
725316.times{|i|
7254 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7255 typedef_args.prepend(", ") if i != 0
7256 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7257 call_args.prepend(", ") if i != 0
7258 puts %Q{
7259static VALUE
7260builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7261{
7262 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7263 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7264}}
7265}
7266
7267puts
7268puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
726916.times{|i|
7270 puts " builtin_invoker#{i},"
7271}
7272puts "};"
7273*/
7274
7275static VALUE
7276builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7277{
7278 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7279 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7280}
7281
7282static VALUE
7283builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7284{
7285 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7286 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7287}
7288
7289static VALUE
7290builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7291{
7292 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7293 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7294}
7295
7296static VALUE
7297builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7298{
7299 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7300 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7301}
7302
7303static VALUE
7304builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7305{
7306 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7307 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7308}
7309
7310static VALUE
7311builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7312{
7313 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7314 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7315}
7316
7317static VALUE
7318builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7319{
7320 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7321 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7322}
7323
7324static VALUE
7325builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7326{
7327 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7328 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7329}
7330
7331static VALUE
7332builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7333{
7334 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7335 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7336}
7337
7338static VALUE
7339builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7340{
7341 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7342 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7343}
7344
7345static VALUE
7346builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7347{
7348 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7349 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7350}
7351
7352static VALUE
7353builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7354{
7355 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7356 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7357}
7358
7359static VALUE
7360builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7361{
7362 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7363 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7364}
7365
7366static VALUE
7367builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7368{
7369 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7370 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7371}
7372
7373static VALUE
7374builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7375{
7376 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7377 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7378}
7379
7380static VALUE
7381builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7382{
7383 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7384 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7385}
7386
7387typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7388
7389static builtin_invoker
7390lookup_builtin_invoker(int argc)
7391{
7392 static const builtin_invoker invokers[] = {
7393 builtin_invoker0,
7394 builtin_invoker1,
7395 builtin_invoker2,
7396 builtin_invoker3,
7397 builtin_invoker4,
7398 builtin_invoker5,
7399 builtin_invoker6,
7400 builtin_invoker7,
7401 builtin_invoker8,
7402 builtin_invoker9,
7403 builtin_invoker10,
7404 builtin_invoker11,
7405 builtin_invoker12,
7406 builtin_invoker13,
7407 builtin_invoker14,
7408 builtin_invoker15,
7409 };
7410
7411 return invokers[argc];
7412}
7413
7414static inline VALUE
7415invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7416{
7417 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7418 SETUP_CANARY(canary_p);
7419 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7420 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7421 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7422 return ret;
7423}
7424
7425static VALUE
7426vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7427{
7428 return invoke_bf(ec, cfp, bf, argv);
7429}
7430
7431static VALUE
7432vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7433{
7434 if (0) { // debug print
7435 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7436 for (int i=0; i<bf->argc; i++) {
7437 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7438 }
7439 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7440 (void *)(uintptr_t)bf->func_ptr);
7441 }
7442
7443 if (bf->argc == 0) {
7444 return invoke_bf(ec, cfp, bf, NULL);
7445 }
7446 else {
7447 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7448 return invoke_bf(ec, cfp, bf, argv);
7449 }
7450}
7451
7452// for __builtin_inline!()
7453
7454VALUE
7455rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7456{
7457 const rb_control_frame_t *cfp = ec->cfp;
7458 return cfp->ep[index];
7459}
7460
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2800
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1583
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1475
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1454
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:695
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:64
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2125
VALUE rb_cRegexp
Regexp class.
Definition re.c:2662
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1311
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:657
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:60
VALUE rb_cModule
Module class.
Definition object.c:63
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:233
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:880
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1029
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1952
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3722
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1927
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2009
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1910
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1976
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2042
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3755
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5416
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3721
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4004
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2396
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:884
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1486
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3575
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2079
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4366
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4422
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1443
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:4042
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3410
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:135
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3581
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:417
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2156
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3904
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4444
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:374
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3898
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1425
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1964
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1117
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:972
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:87
SVAR (Special VARiable)
Definition imemo.h:51
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:53
THROW_DATA.
Definition imemo.h:60
Definition vm_core.h:299
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376