Ruby 3.5.0dev (2025-02-20 revision 34098b669c0cbc024cd08e686891f1dfe0a10aaf)
vm_insnhelper.c (34098b669c0cbc024cd08e686891f1dfe0a10aaf)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/struct.h"
30#include "variable.h"
31
32/* finish iseq array */
33#include "insns.inc"
34#include "insns_info.inc"
35
36extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
37extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
38extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
39extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
40 int argc, const VALUE *argv, int priv);
41
42static const struct rb_callcache vm_empty_cc;
43static const struct rb_callcache vm_empty_cc_for_super;
44
45/* control stack frame */
46
47static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
48
50ruby_vm_special_exception_copy(VALUE exc)
51{
53 rb_obj_copy_ivar(e, exc);
54 return e;
55}
56
57NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58static void
59ec_stack_overflow(rb_execution_context_t *ec, int setup)
60{
61 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62 ec->raised_flag = RAISED_STACKOVERFLOW;
63 if (setup) {
64 VALUE at = rb_ec_backtrace_object(ec);
65 mesg = ruby_vm_special_exception_copy(mesg);
66 rb_ivar_set(mesg, idBt, at);
67 rb_ivar_set(mesg, idBt_locations, at);
68 }
69 ec->errinfo = mesg;
70 EC_JUMP_TAG(ec, TAG_RAISE);
71}
72
73NORETURN(static void vm_stackoverflow(void));
74
75static void
76vm_stackoverflow(void)
77{
78 ec_stack_overflow(GET_EC(), TRUE);
79}
80
81NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93#ifdef USE_SIGALTSTACK
94 ec_stack_overflow(ec, TRUE);
95#else
96 ec_stack_overflow(ec, FALSE);
97#endif
98}
99
100static inline void stack_check(rb_execution_context_t *ec);
101
102#if VM_CHECK_MODE > 0
103static int
104callable_class_p(VALUE klass)
105{
106#if VM_CHECK_MODE >= 2
107 if (!klass) return FALSE;
108 switch (RB_BUILTIN_TYPE(klass)) {
109 default:
110 break;
111 case T_ICLASS:
112 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
113 case T_MODULE:
114 return TRUE;
115 }
116 while (klass) {
117 if (klass == rb_cBasicObject) {
118 return TRUE;
119 }
120 klass = RCLASS_SUPER(klass);
121 }
122 return FALSE;
123#else
124 return klass != 0;
125#endif
126}
127
128static int
129callable_method_entry_p(const rb_callable_method_entry_t *cme)
130{
131 if (cme == NULL) {
132 return TRUE;
133 }
134 else {
135 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
136
137 if (callable_class_p(cme->defined_class)) {
138 return TRUE;
139 }
140 else {
141 return FALSE;
142 }
143 }
144}
145
146static void
147vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
148{
149 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
150 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
151
152 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
153 cref_or_me_type = imemo_type(cref_or_me);
154 }
155 if (type & VM_FRAME_FLAG_BMETHOD) {
156 req_me = TRUE;
157 }
158
159 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
160 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
161 }
162 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
163 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
164 }
165
166 if (req_me) {
167 if (cref_or_me_type != imemo_ment) {
168 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
169 }
170 }
171 else {
172 if (req_cref && cref_or_me_type != imemo_cref) {
173 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
174 }
175 else { /* cref or Qfalse */
176 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
177 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
178 /* ignore */
179 }
180 else {
181 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
182 }
183 }
184 }
185 }
186
187 if (cref_or_me_type == imemo_ment) {
188 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
189
190 if (!callable_method_entry_p(me)) {
191 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
192 }
193 }
194
195 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
196 VM_ASSERT(iseq == NULL ||
197 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
198 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
199 );
200 }
201 else {
202 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
203 }
204}
205
206static void
207vm_check_frame(VALUE type,
208 VALUE specval,
209 VALUE cref_or_me,
210 const rb_iseq_t *iseq)
211{
212 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
213 VM_ASSERT(FIXNUM_P(type));
214
215#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
216 case magic: \
217 vm_check_frame_detail(type, req_block, req_me, req_cref, \
218 specval, cref_or_me, is_cframe, iseq); \
219 break
220 switch (given_magic) {
221 /* BLK ME CREF CFRAME */
222 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
224 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
225 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
226 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
227 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
228 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
229 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
230 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
231 default:
232 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
233 }
234#undef CHECK
235}
236
237static VALUE vm_stack_canary; /* Initialized later */
238static bool vm_stack_canary_was_born = false;
239
240// Return the index of the instruction right before the given PC.
241// This is needed because insn_entry advances PC before the insn body.
242static unsigned int
243previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
244{
245 unsigned int pos = 0;
246 while (pos < ISEQ_BODY(iseq)->iseq_size) {
247 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
248 unsigned int next_pos = pos + insn_len(opcode);
249 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
250 return pos;
251 }
252 pos = next_pos;
253 }
254 rb_bug("failed to find the previous insn");
255}
256
257void
258rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
259{
260 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
261 const struct rb_iseq_struct *iseq;
262
263 if (! LIKELY(vm_stack_canary_was_born)) {
264 return; /* :FIXME: isn't it rather fatal to enter this branch? */
265 }
266 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
267 /* This is at the very beginning of a thread. cfp does not exist. */
268 return;
269 }
270 else if (! (iseq = GET_ISEQ())) {
271 return;
272 }
273 else if (LIKELY(sp[0] != vm_stack_canary)) {
274 return;
275 }
276 else {
277 /* we are going to call methods below; squash the canary to
278 * prevent infinite loop. */
279 sp[0] = Qundef;
280 }
281
282 const VALUE *orig = rb_iseq_original_iseq(iseq);
283 const VALUE iseqw = rb_iseqw_new(iseq);
284 const VALUE inspection = rb_inspect(iseqw);
285 const char *stri = rb_str_to_cstr(inspection);
286 const VALUE disasm = rb_iseq_disasm(iseq);
287 const char *strd = rb_str_to_cstr(disasm);
288 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
289 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
290 const char *name = insn_name(insn);
291
292 /* rb_bug() is not capable of outputting this large contents. It
293 is designed to run form a SIGSEGV handler, which tends to be
294 very restricted. */
295 ruby_debug_printf(
296 "We are killing the stack canary set by %s, "
297 "at %s@pc=%"PRIdPTR"\n"
298 "watch out the C stack trace.\n"
299 "%s",
300 name, stri, pos, strd);
301 rb_bug("see above.");
302}
303#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
304
305#else
306#define vm_check_canary(ec, sp)
307#define vm_check_frame(a, b, c, d)
308#endif /* VM_CHECK_MODE > 0 */
309
310#if USE_DEBUG_COUNTER
311static void
312vm_push_frame_debug_counter_inc(
313 const struct rb_execution_context_struct *ec,
314 const struct rb_control_frame_struct *reg_cfp,
315 VALUE type)
316{
317 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
318
319 RB_DEBUG_COUNTER_INC(frame_push);
320
321 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
322 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
323 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
324 if (prev) {
325 if (curr) {
326 RB_DEBUG_COUNTER_INC(frame_R2R);
327 }
328 else {
329 RB_DEBUG_COUNTER_INC(frame_R2C);
330 }
331 }
332 else {
333 if (curr) {
334 RB_DEBUG_COUNTER_INC(frame_C2R);
335 }
336 else {
337 RB_DEBUG_COUNTER_INC(frame_C2C);
338 }
339 }
340 }
341
342 switch (type & VM_FRAME_MAGIC_MASK) {
343 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
344 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
345 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
346 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
347 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
348 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
349 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
350 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
351 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
352 }
353
354 rb_bug("unreachable");
355}
356#else
357#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
358#endif
359
360// Return a poison value to be set above the stack top to verify leafness.
361VALUE
362rb_vm_stack_canary(void)
363{
364#if VM_CHECK_MODE > 0
365 return vm_stack_canary;
366#else
367 return 0;
368#endif
369}
370
371STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
372STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
373STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
374
375static void
376vm_push_frame(rb_execution_context_t *ec,
377 const rb_iseq_t *iseq,
378 VALUE type,
379 VALUE self,
380 VALUE specval,
381 VALUE cref_or_me,
382 const VALUE *pc,
383 VALUE *sp,
384 int local_size,
385 int stack_max)
386{
387 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
388
389 vm_check_frame(type, specval, cref_or_me, iseq);
390 VM_ASSERT(local_size >= 0);
391
392 /* check stack overflow */
393 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
394 vm_check_canary(ec, sp);
395
396 /* setup vm value stack */
397
398 /* initialize local variables */
399 for (int i=0; i < local_size; i++) {
400 *sp++ = Qnil;
401 }
402
403 /* setup ep with managing data */
404 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
405 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
406 *sp++ = type; /* ep[-0] / ENV_FLAGS */
407
408 /* setup new frame */
409 *cfp = (const struct rb_control_frame_struct) {
410 .pc = pc,
411 .sp = sp,
412 .iseq = iseq,
413 .self = self,
414 .ep = sp - 1,
415 .block_code = NULL,
416#if VM_DEBUG_BP_CHECK
417 .bp_check = sp,
418#endif
419 .jit_return = NULL
420 };
421
422 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
423 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
424 future/untested compilers/platforms. */
425
426 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
427 atomic_signal_fence(memory_order_seq_cst);
428 #endif
429
430 ec->cfp = cfp;
431
432 if (VMDEBUG == 2) {
433 SDR();
434 }
435 vm_push_frame_debug_counter_inc(ec, cfp, type);
436}
437
438void
439rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
440{
441 rb_control_frame_t *cfp = ec->cfp;
442
443 if (VMDEBUG == 2) SDR();
444
445 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
446}
447
448/* return TRUE if the frame is finished */
449static inline int
450vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
451{
452 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
453
454 if (VMDEBUG == 2) SDR();
455
456 RUBY_VM_CHECK_INTS(ec);
457 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
458
459 return flags & VM_FRAME_FLAG_FINISH;
460}
461
462void
463rb_vm_pop_frame(rb_execution_context_t *ec)
464{
465 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
466}
467
468// it pushes pseudo-frame with fname filename.
469VALUE
470rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
471{
472 VALUE tmpbuf = rb_imemo_tmpbuf_auto_free_pointer();
473 void *ptr = ruby_xcalloc(sizeof(struct rb_iseq_constant_body) + sizeof(struct rb_iseq_struct), 1);
474 rb_imemo_tmpbuf_set_ptr(tmpbuf, ptr);
475
476 struct rb_iseq_struct *dmy_iseq = (struct rb_iseq_struct *)ptr;
477 struct rb_iseq_constant_body *dmy_body = (struct rb_iseq_constant_body *)&dmy_iseq[1];
478 dmy_iseq->body = dmy_body;
479 dmy_body->type = ISEQ_TYPE_TOP;
480 dmy_body->location.pathobj = fname;
481
482 vm_push_frame(ec,
483 dmy_iseq, //const rb_iseq_t *iseq,
484 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
485 ec->cfp->self, // VALUE self,
486 VM_BLOCK_HANDLER_NONE, // VALUE specval,
487 Qfalse, // VALUE cref_or_me,
488 NULL, // const VALUE *pc,
489 ec->cfp->sp, // VALUE *sp,
490 0, // int local_size,
491 0); // int stack_max
492
493 return tmpbuf;
494}
495
496/* method dispatch */
497static inline VALUE
498rb_arity_error_new(int argc, int min, int max)
499{
500 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
501 if (min == max) {
502 /* max is not needed */
503 }
504 else if (max == UNLIMITED_ARGUMENTS) {
505 rb_str_cat_cstr(err_mess, "+");
506 }
507 else {
508 rb_str_catf(err_mess, "..%d", max);
509 }
510 rb_str_cat_cstr(err_mess, ")");
511 return rb_exc_new3(rb_eArgError, err_mess);
512}
513
514void
515rb_error_arity(int argc, int min, int max)
516{
517 rb_exc_raise(rb_arity_error_new(argc, min, max));
518}
519
520/* lvar */
521
522NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
523
524static void
525vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
526{
527 /* remember env value forcely */
528 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
529 VM_FORCE_WRITE(&ep[index], v);
530 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
531 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
532}
533
534// YJIT assumes this function never runs GC
535static inline void
536vm_env_write(const VALUE *ep, int index, VALUE v)
537{
538 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
539 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
540 VM_STACK_ENV_WRITE(ep, index, v);
541 }
542 else {
543 vm_env_write_slowpath(ep, index, v);
544 }
545}
546
547void
548rb_vm_env_write(const VALUE *ep, int index, VALUE v)
549{
550 vm_env_write(ep, index, v);
551}
552
553VALUE
554rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
555{
556 if (block_handler == VM_BLOCK_HANDLER_NONE) {
557 return Qnil;
558 }
559 else {
560 switch (vm_block_handler_type(block_handler)) {
561 case block_handler_type_iseq:
562 case block_handler_type_ifunc:
563 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
564 case block_handler_type_symbol:
565 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
566 case block_handler_type_proc:
567 return VM_BH_TO_PROC(block_handler);
568 default:
569 VM_UNREACHABLE(rb_vm_bh_to_procval);
570 }
571 }
572}
573
574/* svar */
575
576#if VM_CHECK_MODE > 0
577static int
578vm_svar_valid_p(VALUE svar)
579{
580 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
581 switch (imemo_type(svar)) {
582 case imemo_svar:
583 case imemo_cref:
584 case imemo_ment:
585 return TRUE;
586 default:
587 break;
588 }
589 }
590 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
591 return FALSE;
592}
593#endif
594
595static inline struct vm_svar *
596lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
597{
598 VALUE svar;
599
600 if (lep && (ec == NULL || ec->root_lep != lep)) {
601 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
602 }
603 else {
604 svar = ec->root_svar;
605 }
606
607 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
608
609 return (struct vm_svar *)svar;
610}
611
612static inline void
613lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
614{
615 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
616
617 if (lep && (ec == NULL || ec->root_lep != lep)) {
618 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
619 }
620 else {
621 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
622 }
623}
624
625static VALUE
626lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
627{
628 const struct vm_svar *svar = lep_svar(ec, lep);
629
630 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
631
632 switch (key) {
633 case VM_SVAR_LASTLINE:
634 return svar->lastline;
635 case VM_SVAR_BACKREF:
636 return svar->backref;
637 default: {
638 const VALUE ary = svar->others;
639
640 if (NIL_P(ary)) {
641 return Qnil;
642 }
643 else {
644 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
645 }
646 }
647 }
648}
649
650static struct vm_svar *
651svar_new(VALUE obj)
652{
653 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
654 *((VALUE *)&svar->lastline) = Qnil;
655 *((VALUE *)&svar->backref) = Qnil;
656 *((VALUE *)&svar->others) = Qnil;
657
658 return svar;
659}
660
661static void
662lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
663{
664 struct vm_svar *svar = lep_svar(ec, lep);
665
666 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
667 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
668 }
669
670 switch (key) {
671 case VM_SVAR_LASTLINE:
672 RB_OBJ_WRITE(svar, &svar->lastline, val);
673 return;
674 case VM_SVAR_BACKREF:
675 RB_OBJ_WRITE(svar, &svar->backref, val);
676 return;
677 default: {
678 VALUE ary = svar->others;
679
680 if (NIL_P(ary)) {
681 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
682 }
683 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
684 }
685 }
686}
687
688static inline VALUE
689vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
690{
691 VALUE val;
692
693 if (type == 0) {
694 val = lep_svar_get(ec, lep, key);
695 }
696 else {
697 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
698
699 if (type & 0x01) {
700 switch (type >> 1) {
701 case '&':
702 val = rb_reg_last_match(backref);
703 break;
704 case '`':
705 val = rb_reg_match_pre(backref);
706 break;
707 case '\'':
708 val = rb_reg_match_post(backref);
709 break;
710 case '+':
711 val = rb_reg_match_last(backref);
712 break;
713 default:
714 rb_bug("unexpected back-ref");
715 }
716 }
717 else {
718 val = rb_reg_nth_match((int)(type >> 1), backref);
719 }
720 }
721 return val;
722}
723
724static inline VALUE
725vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
726{
727 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
728 int nth = 0;
729
730 if (type & 0x01) {
731 switch (type >> 1) {
732 case '&':
733 case '`':
734 case '\'':
735 break;
736 case '+':
737 return rb_reg_last_defined(backref);
738 default:
739 rb_bug("unexpected back-ref");
740 }
741 }
742 else {
743 nth = (int)(type >> 1);
744 }
745 return rb_reg_nth_defined(nth, backref);
746}
747
748PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
750check_method_entry(VALUE obj, int can_be_svar)
751{
752 if (obj == Qfalse) return NULL;
753
754#if VM_CHECK_MODE > 0
755 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
756#endif
757
758 switch (imemo_type(obj)) {
759 case imemo_ment:
760 return (rb_callable_method_entry_t *)obj;
761 case imemo_cref:
762 return NULL;
763 case imemo_svar:
764 if (can_be_svar) {
765 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
766 }
767 default:
768#if VM_CHECK_MODE > 0
769 rb_bug("check_method_entry: svar should not be there:");
770#endif
771 return NULL;
772 }
773}
774
776rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
777{
778 const VALUE *ep = cfp->ep;
780
781 while (!VM_ENV_LOCAL_P(ep)) {
782 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
783 ep = VM_ENV_PREV_EP(ep);
784 }
785
786 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
787}
788
789static const rb_iseq_t *
790method_entry_iseqptr(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.iseqptr;
795 default:
796 return NULL;
797 }
798}
799
800static rb_cref_t *
801method_entry_cref(const rb_callable_method_entry_t *me)
802{
803 switch (me->def->type) {
804 case VM_METHOD_TYPE_ISEQ:
805 return me->def->body.iseq.cref;
806 default:
807 return NULL;
808 }
809}
810
811#if VM_CHECK_MODE == 0
812PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
813#endif
814static rb_cref_t *
815check_cref(VALUE obj, int can_be_svar)
816{
817 if (obj == Qfalse) return NULL;
818
819#if VM_CHECK_MODE > 0
820 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
821#endif
822
823 switch (imemo_type(obj)) {
824 case imemo_ment:
825 return method_entry_cref((rb_callable_method_entry_t *)obj);
826 case imemo_cref:
827 return (rb_cref_t *)obj;
828 case imemo_svar:
829 if (can_be_svar) {
830 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
831 }
832 default:
833#if VM_CHECK_MODE > 0
834 rb_bug("check_method_entry: svar should not be there:");
835#endif
836 return NULL;
837 }
838}
839
840static inline rb_cref_t *
841vm_env_cref(const VALUE *ep)
842{
843 rb_cref_t *cref;
844
845 while (!VM_ENV_LOCAL_P(ep)) {
846 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
847 ep = VM_ENV_PREV_EP(ep);
848 }
849
850 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
851}
852
853static int
854is_cref(const VALUE v, int can_be_svar)
855{
856 if (RB_TYPE_P(v, T_IMEMO)) {
857 switch (imemo_type(v)) {
858 case imemo_cref:
859 return TRUE;
860 case imemo_svar:
861 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
862 default:
863 break;
864 }
865 }
866 return FALSE;
867}
868
869static int
870vm_env_cref_by_cref(const VALUE *ep)
871{
872 while (!VM_ENV_LOCAL_P(ep)) {
873 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
874 ep = VM_ENV_PREV_EP(ep);
875 }
876 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
877}
878
879static rb_cref_t *
880cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
881{
882 const VALUE v = *vptr;
883 rb_cref_t *cref, *new_cref;
884
885 if (RB_TYPE_P(v, T_IMEMO)) {
886 switch (imemo_type(v)) {
887 case imemo_cref:
888 cref = (rb_cref_t *)v;
889 new_cref = vm_cref_dup(cref);
890 if (parent) {
891 RB_OBJ_WRITE(parent, vptr, new_cref);
892 }
893 else {
894 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
895 }
896 return (rb_cref_t *)new_cref;
897 case imemo_svar:
898 if (can_be_svar) {
899 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
900 }
901 /* fall through */
902 case imemo_ment:
903 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
904 default:
905 break;
906 }
907 }
908 return NULL;
909}
910
911static rb_cref_t *
912vm_cref_replace_with_duplicated_cref(const VALUE *ep)
913{
914 if (vm_env_cref_by_cref(ep)) {
915 rb_cref_t *cref;
916 VALUE envval;
917
918 while (!VM_ENV_LOCAL_P(ep)) {
919 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
920 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
921 return cref;
922 }
923 ep = VM_ENV_PREV_EP(ep);
924 }
925 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
926 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
927 }
928 else {
929 rb_bug("vm_cref_dup: unreachable");
930 }
931}
932
933static rb_cref_t *
934vm_get_cref(const VALUE *ep)
935{
936 rb_cref_t *cref = vm_env_cref(ep);
937
938 if (cref != NULL) {
939 return cref;
940 }
941 else {
942 rb_bug("vm_get_cref: unreachable");
943 }
944}
945
946rb_cref_t *
947rb_vm_get_cref(const VALUE *ep)
948{
949 return vm_get_cref(ep);
950}
951
952static rb_cref_t *
953vm_ec_cref(const rb_execution_context_t *ec)
954{
955 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
956
957 if (cfp == NULL) {
958 return NULL;
959 }
960 return vm_get_cref(cfp->ep);
961}
962
963static const rb_cref_t *
964vm_get_const_key_cref(const VALUE *ep)
965{
966 const rb_cref_t *cref = vm_get_cref(ep);
967 const rb_cref_t *key_cref = cref;
968
969 while (cref) {
970 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
971 RCLASS_EXT(CREF_CLASS(cref))->cloned) {
972 return key_cref;
973 }
974 cref = CREF_NEXT(cref);
975 }
976
977 /* does not include singleton class */
978 return NULL;
979}
980
981void
982rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
983{
984 rb_cref_t *new_cref;
985
986 while (cref) {
987 if (CREF_CLASS(cref) == old_klass) {
988 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989 *new_cref_ptr = new_cref;
990 return;
991 }
992 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993 cref = CREF_NEXT(cref);
994 *new_cref_ptr = new_cref;
995 new_cref_ptr = &new_cref->next;
996 }
997 *new_cref_ptr = NULL;
998}
999
1000static rb_cref_t *
1001vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1002{
1003 rb_cref_t *prev_cref = NULL;
1004
1005 if (ep) {
1006 prev_cref = vm_env_cref(ep);
1007 }
1008 else {
1009 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1010
1011 if (cfp) {
1012 prev_cref = vm_env_cref(cfp->ep);
1013 }
1014 }
1015
1016 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1017}
1018
1019static inline VALUE
1020vm_get_cbase(const VALUE *ep)
1021{
1022 const rb_cref_t *cref = vm_get_cref(ep);
1023
1024 return CREF_CLASS_FOR_DEFINITION(cref);
1025}
1026
1027static inline VALUE
1028vm_get_const_base(const VALUE *ep)
1029{
1030 const rb_cref_t *cref = vm_get_cref(ep);
1031
1032 while (cref) {
1033 if (!CREF_PUSHED_BY_EVAL(cref)) {
1034 return CREF_CLASS_FOR_DEFINITION(cref);
1035 }
1036 cref = CREF_NEXT(cref);
1037 }
1038
1039 return Qundef;
1040}
1041
1042static inline void
1043vm_check_if_namespace(VALUE klass)
1044{
1045 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1046 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1047 }
1048}
1049
1050static inline void
1051vm_ensure_not_refinement_module(VALUE self)
1052{
1053 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1054 rb_warn("not defined at the refinement, but at the outer class/module");
1055 }
1056}
1057
1058static inline VALUE
1059vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1060{
1061 return klass;
1062}
1063
1064static inline VALUE
1065vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1066{
1067 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1068 VALUE val;
1069
1070 if (NIL_P(orig_klass) && allow_nil) {
1071 /* in current lexical scope */
1072 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1073 const rb_cref_t *cref;
1074 VALUE klass = Qnil;
1075
1076 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1077 root_cref = CREF_NEXT(root_cref);
1078 }
1079 cref = root_cref;
1080 while (cref && CREF_NEXT(cref)) {
1081 if (CREF_PUSHED_BY_EVAL(cref)) {
1082 klass = Qnil;
1083 }
1084 else {
1085 klass = CREF_CLASS(cref);
1086 }
1087 cref = CREF_NEXT(cref);
1088
1089 if (!NIL_P(klass)) {
1090 VALUE av, am = 0;
1091 rb_const_entry_t *ce;
1092 search_continue:
1093 if ((ce = rb_const_lookup(klass, id))) {
1094 rb_const_warn_if_deprecated(ce, klass, id);
1095 val = ce->value;
1096 if (UNDEF_P(val)) {
1097 if (am == klass) break;
1098 am = klass;
1099 if (is_defined) return 1;
1100 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1101 rb_autoload_load(klass, id);
1102 goto search_continue;
1103 }
1104 else {
1105 if (is_defined) {
1106 return 1;
1107 }
1108 else {
1109 if (UNLIKELY(!rb_ractor_main_p())) {
1110 if (!rb_ractor_shareable_p(val)) {
1111 rb_raise(rb_eRactorIsolationError,
1112 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1113 }
1114 }
1115 return val;
1116 }
1117 }
1118 }
1119 }
1120 }
1121
1122 /* search self */
1123 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1124 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1125 }
1126 else {
1127 klass = CLASS_OF(ec->cfp->self);
1128 }
1129
1130 if (is_defined) {
1131 return rb_const_defined(klass, id);
1132 }
1133 else {
1134 return rb_const_get(klass, id);
1135 }
1136 }
1137 else {
1138 vm_check_if_namespace(orig_klass);
1139 if (is_defined) {
1140 return rb_public_const_defined_from(orig_klass, id);
1141 }
1142 else {
1143 return rb_public_const_get_from(orig_klass, id);
1144 }
1145 }
1146}
1147
1148VALUE
1149rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1150{
1151 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1152}
1153
1154static inline VALUE
1155vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1156{
1157 VALUE val = Qnil;
1158 int idx = 0;
1159 int allow_nil = TRUE;
1160 if (segments[0] == idNULL) {
1161 val = rb_cObject;
1162 idx++;
1163 allow_nil = FALSE;
1164 }
1165 while (segments[idx]) {
1166 ID id = segments[idx++];
1167 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1168 allow_nil = FALSE;
1169 }
1170 return val;
1171}
1172
1173
1174static inline VALUE
1175vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1176{
1177 VALUE klass;
1178
1179 if (!cref) {
1180 rb_bug("vm_get_cvar_base: no cref");
1181 }
1182
1183 while (CREF_NEXT(cref) &&
1184 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1185 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1186 cref = CREF_NEXT(cref);
1187 }
1188 if (top_level_raise && !CREF_NEXT(cref)) {
1189 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1190 }
1191
1192 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1193
1194 if (NIL_P(klass)) {
1195 rb_raise(rb_eTypeError, "no class variables available");
1196 }
1197 return klass;
1198}
1199
1200ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1201static inline void
1202fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1203{
1204 if (is_attr) {
1205 vm_cc_attr_index_set(cc, index, shape_id);
1206 }
1207 else {
1208 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1209 }
1210}
1211
1212#define ractor_incidental_shareable_p(cond, val) \
1213 (!(cond) || rb_ractor_shareable_p(val))
1214#define ractor_object_incidental_shareable_p(obj, val) \
1215 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1216
1217#define ATTR_INDEX_NOT_SET (attr_index_t)-1
1218
1219ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1220static inline VALUE
1221vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1222{
1223#if OPT_IC_FOR_IVAR
1224 VALUE val = Qundef;
1225 shape_id_t shape_id;
1226 VALUE * ivar_list;
1227
1228 if (SPECIAL_CONST_P(obj)) {
1229 return default_value;
1230 }
1231
1232#if SHAPE_IN_BASIC_FLAGS
1233 shape_id = RBASIC_SHAPE_ID(obj);
1234#endif
1235
1236 switch (BUILTIN_TYPE(obj)) {
1237 case T_OBJECT:
1238 ivar_list = ROBJECT_IVPTR(obj);
1239 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1240
1241#if !SHAPE_IN_BASIC_FLAGS
1242 shape_id = ROBJECT_SHAPE_ID(obj);
1243#endif
1244 break;
1245 case T_CLASS:
1246 case T_MODULE:
1247 {
1248 if (UNLIKELY(!rb_ractor_main_p())) {
1249 // For two reasons we can only use the fast path on the main
1250 // ractor.
1251 // First, only the main ractor is allowed to set ivars on classes
1252 // and modules. So we can skip locking.
1253 // Second, other ractors need to check the shareability of the
1254 // values returned from the class ivars.
1255
1256 if (default_value == Qundef) { // defined?
1257 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1258 }
1259 else {
1260 goto general_path;
1261 }
1262 }
1263
1264 ivar_list = RCLASS_IVPTR(obj);
1265
1266#if !SHAPE_IN_BASIC_FLAGS
1267 shape_id = RCLASS_SHAPE_ID(obj);
1268#endif
1269
1270 break;
1271 }
1272 default:
1273 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1274 struct gen_ivtbl *ivtbl;
1275 rb_gen_ivtbl_get(obj, id, &ivtbl);
1276#if !SHAPE_IN_BASIC_FLAGS
1277 shape_id = ivtbl->shape_id;
1278#endif
1279 ivar_list = ivtbl->as.shape.ivptr;
1280 }
1281 else {
1282 return default_value;
1283 }
1284 }
1285
1286 shape_id_t cached_id;
1287 attr_index_t index;
1288
1289 if (is_attr) {
1290 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1291 }
1292 else {
1293 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1294 }
1295
1296 if (LIKELY(cached_id == shape_id)) {
1297 RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1298
1299 if (index == ATTR_INDEX_NOT_SET) {
1300 return default_value;
1301 }
1302
1303 val = ivar_list[index];
1304#if USE_DEBUG_COUNTER
1305 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1306
1307 if (RB_TYPE_P(obj, T_OBJECT)) {
1308 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1309 }
1310#endif
1311 RUBY_ASSERT(!UNDEF_P(val));
1312 }
1313 else { // cache miss case
1314#if USE_DEBUG_COUNTER
1315 if (is_attr) {
1316 if (cached_id != INVALID_SHAPE_ID) {
1317 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1318 }
1319 else {
1320 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1321 }
1322 }
1323 else {
1324 if (cached_id != INVALID_SHAPE_ID) {
1325 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1326 }
1327 else {
1328 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1329 }
1330 }
1331 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1332
1333 if (RB_TYPE_P(obj, T_OBJECT)) {
1334 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1335 }
1336#endif
1337
1338 if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
1339 st_table *table = NULL;
1340 switch (BUILTIN_TYPE(obj)) {
1341 case T_CLASS:
1342 case T_MODULE:
1343 table = (st_table *)RCLASS_IVPTR(obj);
1344 break;
1345
1346 case T_OBJECT:
1347 table = ROBJECT_IV_HASH(obj);
1348 break;
1349
1350 default: {
1351 struct gen_ivtbl *ivtbl;
1352 if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
1353 table = ivtbl->as.complex.table;
1354 }
1355 break;
1356 }
1357 }
1358
1359 if (!table || !st_lookup(table, id, &val)) {
1360 val = default_value;
1361 }
1362 }
1363 else {
1364 shape_id_t previous_cached_id = cached_id;
1365 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1366 // This fills in the cache with the shared cache object.
1367 // "ent" is the shared cache object
1368 if (cached_id != previous_cached_id) {
1369 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1370 }
1371
1372 if (index == ATTR_INDEX_NOT_SET) {
1373 val = default_value;
1374 }
1375 else {
1376 // We fetched the ivar list above
1377 val = ivar_list[index];
1378 RUBY_ASSERT(!UNDEF_P(val));
1379 }
1380 }
1381 else {
1382 if (is_attr) {
1383 vm_cc_attr_index_initialize(cc, shape_id);
1384 }
1385 else {
1386 vm_ic_attr_index_initialize(ic, shape_id);
1387 }
1388
1389 val = default_value;
1390 }
1391 }
1392
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1438 rb_check_frozen(obj);
1439
1440 attr_index_t index = rb_obj_ivar_set(obj, id, val);
1441
1442 shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1443
1444 if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
1445 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1446 }
1447
1448 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1449 return val;
1450 }
1451#endif
1452 return rb_ivar_set(obj, id, val);
1453}
1454
1455static VALUE
1456vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1457{
1458 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1459}
1460
1461static VALUE
1462vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1463{
1464 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1465}
1466
1467NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1468static VALUE
1469vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1470{
1471#if SHAPE_IN_BASIC_FLAGS
1472 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1473#else
1474 shape_id_t shape_id = rb_generic_shape_id(obj);
1475#endif
1476
1477 struct gen_ivtbl *ivtbl = 0;
1478
1479 // Cache hit case
1480 if (shape_id == dest_shape_id) {
1481 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1482 }
1483 else if (dest_shape_id != INVALID_SHAPE_ID) {
1484 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1485 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1486
1487 if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1488 RUBY_ASSERT(index < dest_shape->capacity);
1489 }
1490 else {
1491 return Qundef;
1492 }
1493 }
1494 else {
1495 return Qundef;
1496 }
1497
1498 rb_gen_ivtbl_get(obj, 0, &ivtbl);
1499
1500 if (shape_id != dest_shape_id) {
1501#if SHAPE_IN_BASIC_FLAGS
1502 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1503#else
1504 ivtbl->shape_id = dest_shape_id;
1505#endif
1506 }
1507
1508 RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
1509
1510 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1511
1512 return val;
1513}
1514
1515static inline VALUE
1516vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1517{
1518#if OPT_IC_FOR_IVAR
1519 switch (BUILTIN_TYPE(obj)) {
1520 case T_OBJECT:
1521 {
1522 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1523
1524 shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1525 RUBY_ASSERT(dest_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1526
1527 if (LIKELY(shape_id == dest_shape_id)) {
1528 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1529 VM_ASSERT(!rb_ractor_shareable_p(obj));
1530 }
1531 else if (dest_shape_id != INVALID_SHAPE_ID) {
1532 rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1533 rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1534 shape_id_t source_shape_id = dest_shape->parent_id;
1535
1536 if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1537 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1538
1539 ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1540
1541 RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id), id) == dest_shape);
1542 RUBY_ASSERT(index < dest_shape->capacity);
1543 }
1544 else {
1545 break;
1546 }
1547 }
1548 else {
1549 break;
1550 }
1551
1552 VALUE *ptr = ROBJECT_IVPTR(obj);
1553
1554 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
1555 RB_OBJ_WRITE(obj, &ptr[index], val);
1556
1557 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1558 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1559 return val;
1560 }
1561 break;
1562 case T_CLASS:
1563 case T_MODULE:
1564 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1565 default:
1566 break;
1567 }
1568
1569 return Qundef;
1570#endif /* OPT_IC_FOR_IVAR */
1571}
1572
1573static VALUE
1574update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1575{
1576 VALUE defined_class = 0;
1577 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1578
1579 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1580 defined_class = RBASIC(defined_class)->klass;
1581 }
1582
1583 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1584 if (!rb_cvc_tbl) {
1585 rb_bug("the cvc table should be set");
1586 }
1587
1588 VALUE ent_data;
1589 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1590 rb_bug("should have cvar cache entry");
1591 }
1592
1593 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1594
1595 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1596 ent->cref = cref;
1597 ic->entry = ent;
1598
1599 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1600 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1601 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1602 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1603
1604 return cvar_value;
1605}
1606
1607static inline VALUE
1608vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1609{
1610 const rb_cref_t *cref;
1611 cref = vm_get_cref(GET_EP());
1612
1613 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1614 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1615
1616 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1617 RUBY_ASSERT(!UNDEF_P(v));
1618
1619 return v;
1620 }
1621
1622 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1623
1624 return update_classvariable_cache(iseq, klass, id, cref, ic);
1625}
1626
1627VALUE
1628rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1629{
1630 return vm_getclassvariable(iseq, cfp, id, ic);
1631}
1632
1633static inline void
1634vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1635{
1636 const rb_cref_t *cref;
1637 cref = vm_get_cref(GET_EP());
1638
1639 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1640 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1641
1642 rb_class_ivar_set(ic->entry->class_value, id, val);
1643 return;
1644 }
1645
1646 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647
1648 rb_cvar_set(klass, id, val);
1649
1650 update_classvariable_cache(iseq, klass, id, cref, ic);
1651}
1652
1653void
1654rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1655{
1656 vm_setclassvariable(iseq, cfp, id, val, ic);
1657}
1658
1659static inline VALUE
1660vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1661{
1662 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1663}
1664
1665static inline void
1666vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1667{
1668 if (RB_SPECIAL_CONST_P(obj)) {
1670 return;
1671 }
1672
1673 shape_id_t dest_shape_id;
1674 attr_index_t index;
1675 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1676
1677 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1678 switch (BUILTIN_TYPE(obj)) {
1679 case T_OBJECT:
1680 case T_CLASS:
1681 case T_MODULE:
1682 break;
1683 default:
1684 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1685 return;
1686 }
1687 }
1688 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1689 }
1690}
1691
1692void
1693rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1694{
1695 vm_setinstancevariable(iseq, obj, id, val, ic);
1696}
1697
1698static VALUE
1699vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1700{
1701 /* continue throw */
1702
1703 if (FIXNUM_P(err)) {
1704 ec->tag->state = RUBY_TAG_FATAL;
1705 }
1706 else if (SYMBOL_P(err)) {
1707 ec->tag->state = TAG_THROW;
1708 }
1709 else if (THROW_DATA_P(err)) {
1710 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1711 }
1712 else {
1713 ec->tag->state = TAG_RAISE;
1714 }
1715 return err;
1716}
1717
1718static VALUE
1719vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1720 const int flag, const VALUE throwobj)
1721{
1722 const rb_control_frame_t *escape_cfp = NULL;
1723 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1724
1725 if (flag != 0) {
1726 /* do nothing */
1727 }
1728 else if (state == TAG_BREAK) {
1729 int is_orphan = 1;
1730 const VALUE *ep = GET_EP();
1731 const rb_iseq_t *base_iseq = GET_ISEQ();
1732 escape_cfp = reg_cfp;
1733
1734 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1735 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1736 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1737 ep = escape_cfp->ep;
1738 base_iseq = escape_cfp->iseq;
1739 }
1740 else {
1741 ep = VM_ENV_PREV_EP(ep);
1742 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1743 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1744 VM_ASSERT(escape_cfp->iseq == base_iseq);
1745 }
1746 }
1747
1748 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1749 /* lambda{... break ...} */
1750 is_orphan = 0;
1751 state = TAG_RETURN;
1752 }
1753 else {
1754 ep = VM_ENV_PREV_EP(ep);
1755
1756 while (escape_cfp < eocfp) {
1757 if (escape_cfp->ep == ep) {
1758 const rb_iseq_t *const iseq = escape_cfp->iseq;
1759 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1760 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1761 unsigned int i;
1762
1763 if (!ct) break;
1764 for (i=0; i < ct->size; i++) {
1765 const struct iseq_catch_table_entry *const entry =
1766 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1767
1768 if (entry->type == CATCH_TYPE_BREAK &&
1769 entry->iseq == base_iseq &&
1770 entry->start < epc && entry->end >= epc) {
1771 if (entry->cont == epc) { /* found! */
1772 is_orphan = 0;
1773 }
1774 break;
1775 }
1776 }
1777 break;
1778 }
1779
1780 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1781 }
1782 }
1783
1784 if (is_orphan) {
1785 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1786 }
1787 }
1788 else if (state == TAG_RETRY) {
1789 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1790
1791 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1792 }
1793 else if (state == TAG_RETURN) {
1794 const VALUE *current_ep = GET_EP();
1795 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1796 int in_class_frame = 0;
1797 int toplevel = 1;
1798 escape_cfp = reg_cfp;
1799
1800 // find target_lep, target_ep
1801 while (!VM_ENV_LOCAL_P(ep)) {
1802 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1803 target_ep = ep;
1804 }
1805 ep = VM_ENV_PREV_EP(ep);
1806 }
1807 target_lep = ep;
1808
1809 while (escape_cfp < eocfp) {
1810 const VALUE *lep = VM_CF_LEP(escape_cfp);
1811
1812 if (!target_lep) {
1813 target_lep = lep;
1814 }
1815
1816 if (lep == target_lep &&
1817 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1818 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1819 in_class_frame = 1;
1820 target_lep = 0;
1821 }
1822
1823 if (lep == target_lep) {
1824 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1825 toplevel = 0;
1826 if (in_class_frame) {
1827 /* lambda {class A; ... return ...; end} */
1828 goto valid_return;
1829 }
1830 else {
1831 const VALUE *tep = current_ep;
1832
1833 while (target_lep != tep) {
1834 if (escape_cfp->ep == tep) {
1835 /* in lambda */
1836 if (tep == target_ep) {
1837 goto valid_return;
1838 }
1839 else {
1840 goto unexpected_return;
1841 }
1842 }
1843 tep = VM_ENV_PREV_EP(tep);
1844 }
1845 }
1846 }
1847 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1848 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1849 case ISEQ_TYPE_TOP:
1850 case ISEQ_TYPE_MAIN:
1851 if (toplevel) {
1852 if (in_class_frame) goto unexpected_return;
1853 if (target_ep == NULL) {
1854 goto valid_return;
1855 }
1856 else {
1857 goto unexpected_return;
1858 }
1859 }
1860 break;
1861 case ISEQ_TYPE_EVAL: {
1862 const rb_iseq_t *is = escape_cfp->iseq;
1863 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1864 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1865 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1866 t = ISEQ_BODY(is)->type;
1867 }
1868 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1869 break;
1870 }
1871 case ISEQ_TYPE_CLASS:
1872 toplevel = 0;
1873 break;
1874 default:
1875 break;
1876 }
1877 }
1878 }
1879
1880 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1881 if (target_ep == NULL) {
1882 goto valid_return;
1883 }
1884 else {
1885 goto unexpected_return;
1886 }
1887 }
1888
1889 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1890 }
1891 unexpected_return:;
1892 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1893
1894 valid_return:;
1895 /* do nothing */
1896 }
1897 else {
1898 rb_bug("isns(throw): unsupported throw type");
1899 }
1900
1901 ec->tag->state = state;
1902 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1903}
1904
1905static VALUE
1906vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1907 rb_num_t throw_state, VALUE throwobj)
1908{
1909 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1910 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1911
1912 if (state != 0) {
1913 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1914 }
1915 else {
1916 return vm_throw_continue(ec, throwobj);
1917 }
1918}
1919
1920VALUE
1921rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1922{
1923 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1924}
1925
1926static inline void
1927vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1928{
1929 int is_splat = flag & 0x01;
1930 const VALUE *ptr;
1931 rb_num_t len;
1932 const VALUE obj = ary;
1933
1934 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1935 ary = obj;
1936 ptr = &ary;
1937 len = 1;
1938 }
1939 else {
1940 ptr = RARRAY_CONST_PTR(ary);
1941 len = (rb_num_t)RARRAY_LEN(ary);
1942 }
1943
1944 if (num + is_splat == 0) {
1945 /* no space left on stack */
1946 }
1947 else if (flag & 0x02) {
1948 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1949 rb_num_t i = 0, j;
1950
1951 if (len < num) {
1952 for (i = 0; i < num - len; i++) {
1953 *cfp->sp++ = Qnil;
1954 }
1955 }
1956
1957 for (j = 0; i < num; i++, j++) {
1958 VALUE v = ptr[len - j - 1];
1959 *cfp->sp++ = v;
1960 }
1961
1962 if (is_splat) {
1963 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1964 }
1965 }
1966 else {
1967 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1968 if (is_splat) {
1969 if (num > len) {
1970 *cfp->sp++ = rb_ary_new();
1971 }
1972 else {
1973 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1974 }
1975 }
1976
1977 if (num > len) {
1978 rb_num_t i = 0;
1979 for (; i < num - len; i++) {
1980 *cfp->sp++ = Qnil;
1981 }
1982
1983 for (rb_num_t j = 0; i < num; i++, j++) {
1984 *cfp->sp++ = ptr[len - j - 1];
1985 }
1986 }
1987 else {
1988 for (rb_num_t j = 0; j < num; j++) {
1989 *cfp->sp++ = ptr[num - j - 1];
1990 }
1991 }
1992 }
1993
1994 RB_GC_GUARD(ary);
1995}
1996
1997static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1998
1999static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2000
2001static struct rb_class_cc_entries *
2002vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2003{
2004 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
2005#if VM_CHECK_MODE > 0
2006 ccs->debug_sig = ~(VALUE)ccs;
2007#endif
2008 ccs->capa = 0;
2009 ccs->len = 0;
2010 ccs->cme = cme;
2011 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2012 ccs->entries = NULL;
2013
2014 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2015 RB_OBJ_WRITTEN(klass, Qundef, cme);
2016 return ccs;
2017}
2018
2019static void
2020vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2021{
2022 if (! vm_cc_markable(cc)) {
2023 return;
2024 }
2025
2026 if (UNLIKELY(ccs->len == ccs->capa)) {
2027 if (ccs->capa == 0) {
2028 ccs->capa = 1;
2029 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2030 }
2031 else {
2032 ccs->capa *= 2;
2033 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2034 }
2035 }
2036 VM_ASSERT(ccs->len < ccs->capa);
2037
2038 const int pos = ccs->len++;
2039 ccs->entries[pos].argc = vm_ci_argc(ci);
2040 ccs->entries[pos].flag = vm_ci_flag(ci);
2041 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2042
2043 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2044 // for tuning
2045 // vm_mtbl_dump(klass, 0);
2046 }
2047}
2048
2049#if VM_CHECK_MODE > 0
2050void
2051rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2052{
2053 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2054 for (int i=0; i<ccs->len; i++) {
2055 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2056 ccs->entries[i].flag,
2057 ccs->entries[i].argc);
2058 rp(ccs->entries[i].cc);
2059 }
2060}
2061
2062static int
2063vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2064{
2065 VM_ASSERT(vm_ccs_p(ccs));
2066 VM_ASSERT(ccs->len <= ccs->capa);
2067
2068 for (int i=0; i<ccs->len; i++) {
2069 const struct rb_callcache *cc = ccs->entries[i].cc;
2070
2071 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2072 VM_ASSERT(vm_cc_class_check(cc, klass));
2073 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2074 VM_ASSERT(!vm_cc_super_p(cc));
2075 VM_ASSERT(!vm_cc_refinement_p(cc));
2076 }
2077 return TRUE;
2078}
2079#endif
2080
2081const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2082
2083static const struct rb_callcache *
2084vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2085{
2086 const ID mid = vm_ci_mid(ci);
2087 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2088 struct rb_class_cc_entries *ccs = NULL;
2089 VALUE ccs_data;
2090
2091 if (cc_tbl) {
2092 // CCS data is keyed on method id, so we don't need the method id
2093 // for doing comparisons in the `for` loop below.
2094 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2095 ccs = (struct rb_class_cc_entries *)ccs_data;
2096 const int ccs_len = ccs->len;
2097
2098 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2099 rb_vm_ccs_free(ccs);
2100 rb_id_table_delete(cc_tbl, mid);
2101 ccs = NULL;
2102 }
2103 else {
2104 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2105
2106 // We already know the method id is correct because we had
2107 // to look up the ccs_data by method id. All we need to
2108 // compare is argc and flag
2109 unsigned int argc = vm_ci_argc(ci);
2110 unsigned int flag = vm_ci_flag(ci);
2111
2112 for (int i=0; i<ccs_len; i++) {
2113 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2114 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2115 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2116
2117 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2118
2119 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2120 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2121
2122 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2123 VM_ASSERT(ccs_cc->klass == klass);
2124 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2125
2126 return ccs_cc;
2127 }
2128 }
2129 }
2130 }
2131 }
2132 else {
2133 cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
2134 }
2135
2136 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2137
2138 const rb_callable_method_entry_t *cme;
2139
2140 if (ccs) {
2141 cme = ccs->cme;
2142 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2143
2144 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2145 }
2146 else {
2147 cme = rb_callable_method_entry(klass, mid);
2148 }
2149
2150 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2151
2152 if (cme == NULL) {
2153 // undef or not found: can't cache the information
2154 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2155 return &vm_empty_cc;
2156 }
2157
2158 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2159
2160 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2161
2162 if (ccs == NULL) {
2163 VM_ASSERT(cc_tbl != NULL);
2164
2165 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2166 // rb_callable_method_entry() prepares ccs.
2167 ccs = (struct rb_class_cc_entries *)ccs_data;
2168 }
2169 else {
2170 // TODO: required?
2171 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2172 }
2173 }
2174
2175 cme = rb_check_overloaded_cme(cme, ci);
2176
2177 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2178 vm_ccs_push(klass, ccs, ci, cc);
2179
2180 VM_ASSERT(vm_cc_cme(cc) != NULL);
2181 VM_ASSERT(cme->called_id == mid);
2182 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2183
2184 return cc;
2185}
2186
2187const struct rb_callcache *
2188rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2189{
2190 const struct rb_callcache *cc;
2191
2192 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2193
2194 RB_VM_LOCK_ENTER();
2195 {
2196 cc = vm_search_cc(klass, ci);
2197
2198 VM_ASSERT(cc);
2199 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2200 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2201 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2202 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2203 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2204 }
2205 RB_VM_LOCK_LEAVE();
2206
2207 return cc;
2208}
2209
2210static const struct rb_callcache *
2211vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2212{
2213#if USE_DEBUG_COUNTER
2214 const struct rb_callcache *old_cc = cd->cc;
2215#endif
2216
2217 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2218
2219#if OPT_INLINE_METHOD_CACHE
2220 cd->cc = cc;
2221
2222 const struct rb_callcache *empty_cc = &vm_empty_cc;
2223 if (cd_owner && cc != empty_cc) {
2224 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2225 }
2226
2227#if USE_DEBUG_COUNTER
2228 if (!old_cc || old_cc == empty_cc) {
2229 // empty
2230 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2231 }
2232 else if (old_cc == cc) {
2233 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2234 }
2235 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2236 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2237 }
2238 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2239 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2240 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2241 }
2242 else {
2243 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2244 }
2245#endif
2246#endif // OPT_INLINE_METHOD_CACHE
2247
2248 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2249 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2250
2251 return cc;
2252}
2253
2254ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2255static const struct rb_callcache *
2256vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2257{
2258 const struct rb_callcache *cc = cd->cc;
2259
2260#if OPT_INLINE_METHOD_CACHE
2261 if (LIKELY(vm_cc_class_check(cc, klass))) {
2262 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2263 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2264 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2265 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2266 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2267 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2268
2269 return cc;
2270 }
2271 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2272 }
2273 else {
2274 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2275 }
2276#endif
2277
2278 return vm_search_method_slowpath0(cd_owner, cd, klass);
2279}
2280
2281static const struct rb_callcache *
2282vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2283{
2284 VALUE klass = CLASS_OF(recv);
2285 VM_ASSERT(klass != Qfalse);
2286 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2287
2288 return vm_search_method_fastpath(cd_owner, cd, klass);
2289}
2290
2291#if __has_attribute(transparent_union)
2292typedef union {
2293 VALUE (*anyargs)(ANYARGS);
2294 VALUE (*f00)(VALUE);
2295 VALUE (*f01)(VALUE, VALUE);
2296 VALUE (*f02)(VALUE, VALUE, VALUE);
2297 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2298 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2299 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2300 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2301 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2310 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2311} __attribute__((__transparent_union__)) cfunc_type;
2312# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2313#else
2314typedef VALUE (*cfunc_type)(ANYARGS);
2315# define make_cfunc_type(f) (cfunc_type)(f)
2316#endif
2317
2318static inline int
2319check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2320{
2321 if (! me) {
2322 return false;
2323 }
2324 else {
2325 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2326 VM_ASSERT(callable_method_entry_p(me));
2327 VM_ASSERT(me->def);
2328 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2329 return false;
2330 }
2331 else {
2332#if __has_attribute(transparent_union)
2333 return me->def->body.cfunc.func == func.anyargs;
2334#else
2335 return me->def->body.cfunc.func == func;
2336#endif
2337 }
2338 }
2339}
2340
2341static inline int
2342check_method_basic_definition(const rb_callable_method_entry_t *me)
2343{
2344 return me && METHOD_ENTRY_BASIC(me);
2345}
2346
2347static inline int
2348vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2349{
2350 VM_ASSERT(iseq != NULL);
2351 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2352 return check_cfunc(vm_cc_cme(cc), func);
2353}
2354
2355#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2356#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2357
2358#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2359
2360static inline bool
2361FIXNUM_2_P(VALUE a, VALUE b)
2362{
2363 /* FIXNUM_P(a) && FIXNUM_P(b)
2364 * == ((a & 1) && (b & 1))
2365 * == a & b & 1 */
2366 SIGNED_VALUE x = a;
2367 SIGNED_VALUE y = b;
2368 SIGNED_VALUE z = x & y & 1;
2369 return z == 1;
2370}
2371
2372static inline bool
2373FLONUM_2_P(VALUE a, VALUE b)
2374{
2375#if USE_FLONUM
2376 /* FLONUM_P(a) && FLONUM_P(b)
2377 * == ((a & 3) == 2) && ((b & 3) == 2)
2378 * == ! ((a ^ 2) | (b ^ 2) & 3)
2379 */
2380 SIGNED_VALUE x = a;
2381 SIGNED_VALUE y = b;
2382 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2383 return !z;
2384#else
2385 return false;
2386#endif
2387}
2388
2389static VALUE
2390opt_equality_specialized(VALUE recv, VALUE obj)
2391{
2392 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2393 goto compare_by_identity;
2394 }
2395 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2396 goto compare_by_identity;
2397 }
2398 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2399 goto compare_by_identity;
2400 }
2401 else if (SPECIAL_CONST_P(recv)) {
2402 //
2403 }
2404 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2405 double a = RFLOAT_VALUE(recv);
2406 double b = RFLOAT_VALUE(obj);
2407
2408#if MSC_VERSION_BEFORE(1300)
2409 if (isnan(a)) {
2410 return Qfalse;
2411 }
2412 else if (isnan(b)) {
2413 return Qfalse;
2414 }
2415 else
2416#endif
2417 return RBOOL(a == b);
2418 }
2419 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2420 if (recv == obj) {
2421 return Qtrue;
2422 }
2423 else if (RB_TYPE_P(obj, T_STRING)) {
2424 return rb_str_eql_internal(obj, recv);
2425 }
2426 }
2427 return Qundef;
2428
2429 compare_by_identity:
2430 return RBOOL(recv == obj);
2431}
2432
2433static VALUE
2434opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2435{
2436 VM_ASSERT(cd_owner != NULL);
2437
2438 VALUE val = opt_equality_specialized(recv, obj);
2439 if (!UNDEF_P(val)) return val;
2440
2441 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2442 return Qundef;
2443 }
2444 else {
2445 return RBOOL(recv == obj);
2446 }
2447}
2448
2449#undef EQ_UNREDEFINED_P
2450
2451static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2452NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2453
2454static VALUE
2455opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2456{
2457 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2458
2459 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2460 return RBOOL(recv == obj);
2461 }
2462 else {
2463 return Qundef;
2464 }
2465}
2466
2467static VALUE
2468opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2469{
2470 VALUE val = opt_equality_specialized(recv, obj);
2471 if (!UNDEF_P(val)) {
2472 return val;
2473 }
2474 else {
2475 return opt_equality_by_mid_slowpath(recv, obj, mid);
2476 }
2477}
2478
2479VALUE
2480rb_equal_opt(VALUE obj1, VALUE obj2)
2481{
2482 return opt_equality_by_mid(obj1, obj2, idEq);
2483}
2484
2485VALUE
2486rb_eql_opt(VALUE obj1, VALUE obj2)
2487{
2488 return opt_equality_by_mid(obj1, obj2, idEqlP);
2489}
2490
2491extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2492extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2493
2494static VALUE
2495check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2496{
2497 switch (type) {
2498 case VM_CHECKMATCH_TYPE_WHEN:
2499 return pattern;
2500 case VM_CHECKMATCH_TYPE_RESCUE:
2501 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2502 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2503 }
2504 /* fall through */
2505 case VM_CHECKMATCH_TYPE_CASE: {
2506 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2507 }
2508 default:
2509 rb_bug("check_match: unreachable");
2510 }
2511}
2512
2513
2514#if MSC_VERSION_BEFORE(1300)
2515#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2516#else
2517#define CHECK_CMP_NAN(a, b) /* do nothing */
2518#endif
2519
2520static inline VALUE
2521double_cmp_lt(double a, double b)
2522{
2523 CHECK_CMP_NAN(a, b);
2524 return RBOOL(a < b);
2525}
2526
2527static inline VALUE
2528double_cmp_le(double a, double b)
2529{
2530 CHECK_CMP_NAN(a, b);
2531 return RBOOL(a <= b);
2532}
2533
2534static inline VALUE
2535double_cmp_gt(double a, double b)
2536{
2537 CHECK_CMP_NAN(a, b);
2538 return RBOOL(a > b);
2539}
2540
2541static inline VALUE
2542double_cmp_ge(double a, double b)
2543{
2544 CHECK_CMP_NAN(a, b);
2545 return RBOOL(a >= b);
2546}
2547
2548// Copied by vm_dump.c
2549static inline VALUE *
2550vm_base_ptr(const rb_control_frame_t *cfp)
2551{
2552 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2553
2554 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2555 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2556
2557 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2558 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2559 int params = ISEQ_BODY(cfp->iseq)->param.size;
2560
2561 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2562 bp += vm_ci_argc(ci);
2563 }
2564
2565 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2566 /* adjust `self' */
2567 bp += 1;
2568 }
2569#if VM_DEBUG_BP_CHECK
2570 if (bp != cfp->bp_check) {
2571 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2572 (long)(cfp->bp_check - GET_EC()->vm_stack),
2573 (long)(bp - GET_EC()->vm_stack));
2574 rb_bug("vm_base_ptr: unreachable");
2575 }
2576#endif
2577 return bp;
2578 }
2579 else {
2580 return NULL;
2581 }
2582}
2583
2584VALUE *
2585rb_vm_base_ptr(const rb_control_frame_t *cfp)
2586{
2587 return vm_base_ptr(cfp);
2588}
2589
2590/* method call processes with call_info */
2591
2592#include "vm_args.c"
2593
2594static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2595ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2596static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2597static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2598static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2599static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2600static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2601
2602static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2603
2604static VALUE
2605vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2606{
2607 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2608
2609 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2610}
2611
2612static VALUE
2613vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2614{
2615 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2616
2617 const struct rb_callcache *cc = calling->cc;
2618 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2619 int param = ISEQ_BODY(iseq)->param.size;
2620 int local = ISEQ_BODY(iseq)->local_table_size;
2621 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2622}
2623
2624bool
2625rb_simple_iseq_p(const rb_iseq_t *iseq)
2626{
2627 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2628 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2629 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2630 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2631 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2632 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2633 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2634 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2635}
2636
2637bool
2638rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2639{
2640 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2641 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2642 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2643 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2644 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2645 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2646 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2647 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2648}
2649
2650bool
2651rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2652{
2653 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2654 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2655 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2656 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2657 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2658 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2659 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2660}
2661
2662#define ALLOW_HEAP_ARGV (-2)
2663#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2664
2665static inline bool
2666vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2667{
2668 vm_check_canary(GET_EC(), cfp->sp);
2669 bool ret = false;
2670
2671 if (!NIL_P(ary)) {
2672 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2673 long len = RARRAY_LEN(ary);
2674 int argc = calling->argc;
2675
2676 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2677 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2678 * a temporary array, instead of trying to keeping arguments on the VM stack.
2679 */
2680 VALUE *argv = cfp->sp - argc;
2681 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2682 rb_ary_cat(argv_ary, argv, argc);
2683 rb_ary_cat(argv_ary, ptr, len);
2684 cfp->sp -= argc - 1;
2685 cfp->sp[-1] = argv_ary;
2686 calling->argc = 1;
2687 calling->heap_argv = argv_ary;
2688 RB_GC_GUARD(ary);
2689 }
2690 else {
2691 long i;
2692
2693 if (max_args >= 0 && len + argc > max_args) {
2694 /* If only a given max_args is allowed, copy up to max args.
2695 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2696 * where additional arguments are ignored.
2697 *
2698 * Also, copy up to one more argument than the maximum,
2699 * in case it is an empty keyword hash that will be removed.
2700 */
2701 calling->argc += len - (max_args - argc + 1);
2702 len = max_args - argc + 1;
2703 ret = true;
2704 }
2705 else {
2706 /* Unset heap_argv if set originally. Can happen when
2707 * forwarding modified arguments, where heap_argv was used
2708 * originally, but heap_argv not supported by the forwarded
2709 * method in all cases.
2710 */
2711 calling->heap_argv = 0;
2712 }
2713 CHECK_VM_STACK_OVERFLOW(cfp, len);
2714
2715 for (i = 0; i < len; i++) {
2716 *cfp->sp++ = ptr[i];
2717 }
2718 calling->argc += i;
2719 }
2720 }
2721
2722 return ret;
2723}
2724
2725static inline void
2726vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2727{
2728 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2729 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2730 const VALUE h = rb_hash_new_with_size(kw_len);
2731 VALUE *sp = cfp->sp;
2732 int i;
2733
2734 for (i=0; i<kw_len; i++) {
2735 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2736 }
2737 (sp-kw_len)[0] = h;
2738
2739 cfp->sp -= kw_len - 1;
2740 calling->argc -= kw_len - 1;
2741 calling->kw_splat = 1;
2742}
2743
2744static inline VALUE
2745vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2746{
2747 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2748 if (keyword_hash != Qnil) {
2749 /* Convert a non-hash keyword splat to a new hash */
2750 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2751 }
2752 }
2753 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2754 /* Convert a hash keyword splat to a new hash unless
2755 * a mutable keyword splat was passed.
2756 * Skip allocating new hash for empty keyword splat, as empty
2757 * keyword splat will be ignored by both callers.
2758 */
2759 keyword_hash = rb_hash_dup(keyword_hash);
2760 }
2761 return keyword_hash;
2762}
2763
2764static inline void
2765CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2766 struct rb_calling_info *restrict calling,
2767 const struct rb_callinfo *restrict ci, int max_args)
2768{
2769 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2770 if (IS_ARGS_KW_SPLAT(ci)) {
2771 // f(*a, **kw)
2772 VM_ASSERT(calling->kw_splat == 1);
2773
2774 cfp->sp -= 2;
2775 calling->argc -= 2;
2776 VALUE ary = cfp->sp[0];
2777 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2778
2779 // splat a
2780 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2781
2782 // put kw
2783 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2784 if (UNLIKELY(calling->heap_argv)) {
2785 rb_ary_push(calling->heap_argv, kwh);
2786 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2787 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2788 calling->kw_splat = 0;
2789 }
2790 }
2791 else {
2792 cfp->sp[0] = kwh;
2793 cfp->sp++;
2794 calling->argc++;
2795
2796 VM_ASSERT(calling->kw_splat == 1);
2797 }
2798 }
2799 else {
2800 calling->kw_splat = 0;
2801 }
2802 }
2803 else {
2804 // f(*a)
2805 VM_ASSERT(calling->kw_splat == 0);
2806
2807 cfp->sp -= 1;
2808 calling->argc -= 1;
2809 VALUE ary = cfp->sp[0];
2810
2811 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2812 goto check_keyword;
2813 }
2814
2815 // check the last argument
2816 VALUE last_hash, argv_ary;
2817 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2818 if (!IS_ARGS_KEYWORD(ci) &&
2819 RARRAY_LEN(argv_ary) > 0 &&
2820 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2821 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2822
2823 rb_ary_pop(argv_ary);
2824 if (!RHASH_EMPTY_P(last_hash)) {
2825 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2826 calling->kw_splat = 1;
2827 }
2828 }
2829 }
2830 else {
2831check_keyword:
2832 if (!IS_ARGS_KEYWORD(ci) &&
2833 calling->argc > 0 &&
2834 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2835 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2836
2837 if (RHASH_EMPTY_P(last_hash)) {
2838 calling->argc--;
2839 cfp->sp -= 1;
2840 }
2841 else {
2842 cfp->sp[-1] = rb_hash_dup(last_hash);
2843 calling->kw_splat = 1;
2844 }
2845 }
2846 }
2847 }
2848 }
2849 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2850 // f(**kw)
2851 VM_ASSERT(calling->kw_splat == 1);
2852 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2853
2854 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2855 cfp->sp--;
2856 calling->argc--;
2857 calling->kw_splat = 0;
2858 }
2859 else {
2860 cfp->sp[-1] = kwh;
2861 }
2862 }
2863 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2864 // f(k1:1, k2:2)
2865 VM_ASSERT(calling->kw_splat == 0);
2866
2867 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2868 * by creating a keyword hash.
2869 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2870 */
2871 vm_caller_setup_arg_kw(cfp, calling, ci);
2872 }
2873}
2874
2875#define USE_OPT_HIST 0
2876
2877#if USE_OPT_HIST
2878#define OPT_HIST_MAX 64
2879static int opt_hist[OPT_HIST_MAX+1];
2880
2881__attribute__((destructor))
2882static void
2883opt_hist_show_results_at_exit(void)
2884{
2885 for (int i=0; i<OPT_HIST_MAX; i++) {
2886 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2887 }
2888}
2889#endif
2890
2891static VALUE
2892vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2893 struct rb_calling_info *calling)
2894{
2895 const struct rb_callcache *cc = calling->cc;
2896 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2897 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2898 const int opt = calling->argc - lead_num;
2899 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2900 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2901 const int param = ISEQ_BODY(iseq)->param.size;
2902 const int local = ISEQ_BODY(iseq)->local_table_size;
2903 const int delta = opt_num - opt;
2904
2905 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2906
2907#if USE_OPT_HIST
2908 if (opt_pc < OPT_HIST_MAX) {
2909 opt_hist[opt]++;
2910 }
2911 else {
2912 opt_hist[OPT_HIST_MAX]++;
2913 }
2914#endif
2915
2916 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2917}
2918
2919static VALUE
2920vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2921 struct rb_calling_info *calling)
2922{
2923 const struct rb_callcache *cc = calling->cc;
2924 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2925 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2926 const int opt = calling->argc - lead_num;
2927 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2928
2929 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2930
2931#if USE_OPT_HIST
2932 if (opt_pc < OPT_HIST_MAX) {
2933 opt_hist[opt]++;
2934 }
2935 else {
2936 opt_hist[OPT_HIST_MAX]++;
2937 }
2938#endif
2939
2940 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2941}
2942
2943static void
2944args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2945 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2946 VALUE *const locals);
2947
2948static VALUE
2949vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2950 struct rb_calling_info *calling)
2951{
2952 const struct rb_callcache *cc = calling->cc;
2953 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2954 int param_size = ISEQ_BODY(iseq)->param.size;
2955 int local_size = ISEQ_BODY(iseq)->local_table_size;
2956
2957 // Setting up local size and param size
2958 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2959
2960 local_size = local_size + vm_ci_argc(calling->cd->ci);
2961 param_size = param_size + vm_ci_argc(calling->cd->ci);
2962
2963 cfp->sp[0] = (VALUE)calling->cd->ci;
2964
2965 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2966}
2967
2968static VALUE
2969vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2970 struct rb_calling_info *calling)
2971{
2972 const struct rb_callinfo *ci = calling->cd->ci;
2973 const struct rb_callcache *cc = calling->cc;
2974
2975 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2976 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2977
2978 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2979 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2980 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2981 const int ci_kw_len = kw_arg->keyword_len;
2982 const VALUE * const ci_keywords = kw_arg->keywords;
2983 VALUE *argv = cfp->sp - calling->argc;
2984 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2985 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2986 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2987 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2988 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2989
2990 int param = ISEQ_BODY(iseq)->param.size;
2991 int local = ISEQ_BODY(iseq)->local_table_size;
2992 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2993}
2994
2995static VALUE
2996vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2997 struct rb_calling_info *calling)
2998{
2999 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3000 const struct rb_callcache *cc = calling->cc;
3001
3002 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3003 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3004
3005 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3006 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3007 VALUE * const argv = cfp->sp - calling->argc;
3008 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3009
3010 int i;
3011 for (i=0; i<kw_param->num; i++) {
3012 klocals[i] = kw_param->default_values[i];
3013 }
3014 klocals[i] = INT2FIX(0); // kw specify flag
3015 // NOTE:
3016 // nobody check this value, but it should be cleared because it can
3017 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3018
3019 int param = ISEQ_BODY(iseq)->param.size;
3020 int local = ISEQ_BODY(iseq)->local_table_size;
3021 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3022}
3023
3024static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3025
3026static VALUE
3027vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3028 struct rb_calling_info *calling)
3029{
3030 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3031 cfp->sp -= (calling->argc + 1);
3032 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3033 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3034}
3035
3036VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3037
3038static void
3039warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3040{
3041 rb_vm_t *vm = GET_VM();
3042 st_table *dup_check_table = vm->unused_block_warning_table;
3043 st_data_t key;
3044 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3045
3046 union {
3047 VALUE v;
3048 unsigned char b[SIZEOF_VALUE];
3049 } k1 = {
3050 .v = (VALUE)pc,
3051 }, k2 = {
3052 .v = (VALUE)cme->def,
3053 };
3054
3055 // relax check
3056 if (!strict_unused_block) {
3057 key = (st_data_t)cme->def->original_id;
3058
3059 if (st_lookup(dup_check_table, key, NULL)) {
3060 return;
3061 }
3062 }
3063
3064 // strict check
3065 // make unique key from pc and me->def pointer
3066 key = 0;
3067 for (int i=0; i<SIZEOF_VALUE; i++) {
3068 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3069 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3070 }
3071
3072 if (0) {
3073 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3074 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3075 fprintf(stderr, "key:%p\n", (void *)key);
3076 }
3077
3078 // duplication check
3079 if (st_insert(dup_check_table, key, 1)) {
3080 // already shown
3081 }
3082 else if (RTEST(ruby_verbose) || strict_unused_block) {
3083 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3084 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3085
3086 if (!NIL_P(m_loc)) {
3087 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3088 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3089 }
3090 else {
3091 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3092 }
3093 }
3094}
3095
3096static inline int
3097vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3098 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3099{
3100 const struct rb_callinfo *ci = calling->cd->ci;
3101 const struct rb_callcache *cc = calling->cc;
3102
3103 VM_ASSERT((vm_ci_argc(ci), 1));
3104 VM_ASSERT(vm_cc_cme(cc) != NULL);
3105
3106 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3107 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3108 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3109 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3110 }
3111
3112 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3113 if (LIKELY(rb_simple_iseq_p(iseq))) {
3114 rb_control_frame_t *cfp = ec->cfp;
3115 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3116 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3117
3118 if (calling->argc != lead_num) {
3119 argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3120 }
3121
3122 //VM_ASSERT(ci == calling->cd->ci);
3123 VM_ASSERT(cc == calling->cc);
3124
3125 if (vm_call_iseq_optimizable_p(ci, cc)) {
3126 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3127 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3128 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3129 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3130 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3131 }
3132 else {
3133 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3134 }
3135 }
3136 return 0;
3137 }
3138 else if (rb_iseq_only_optparam_p(iseq)) {
3139 rb_control_frame_t *cfp = ec->cfp;
3140
3141 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3142 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3143
3144 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3145 const int argc = calling->argc;
3146 const int opt = argc - lead_num;
3147
3148 if (opt < 0 || opt > opt_num) {
3149 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3150 }
3151
3152 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3153 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3154 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3155 vm_call_cacheable(ci, cc));
3156 }
3157 else {
3158 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3159 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3160 vm_call_cacheable(ci, cc));
3161 }
3162
3163 /* initialize opt vars for self-references */
3164 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3165 for (int i=argc; i<lead_num + opt_num; i++) {
3166 argv[i] = Qnil;
3167 }
3168 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3169 }
3170 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3171 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3172 const int argc = calling->argc;
3173 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3174
3175 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3176 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3177
3178 if (argc - kw_arg->keyword_len == lead_num) {
3179 const int ci_kw_len = kw_arg->keyword_len;
3180 const VALUE * const ci_keywords = kw_arg->keywords;
3181 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3182 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3183
3184 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3185 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3186
3187 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3188 vm_call_cacheable(ci, cc));
3189
3190 return 0;
3191 }
3192 }
3193 else if (argc == lead_num) {
3194 /* no kwarg */
3195 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3196 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3197
3198 if (klocals[kw_param->num] == INT2FIX(0)) {
3199 /* copy from default_values */
3200 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3201 vm_call_cacheable(ci, cc));
3202 }
3203
3204 return 0;
3205 }
3206 }
3207 }
3208
3209 // Called iseq is using ... param
3210 // def foo(...) # <- iseq for foo will have "forwardable"
3211 //
3212 // We want to set the `...` local to the caller's CI
3213 // foo(1, 2) # <- the ci for this should end up as `...`
3214 //
3215 // So hopefully the stack looks like:
3216 //
3217 // => 1
3218 // => 2
3219 // => *
3220 // => **
3221 // => &
3222 // => ... # <- points at `foo`s CI
3223 // => cref_or_me
3224 // => specval
3225 // => type
3226 //
3227 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3228 bool can_fastpath = true;
3229
3230 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3231 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3232 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3233 ci = vm_ci_new_runtime(
3234 vm_ci_mid(ci),
3235 vm_ci_flag(ci),
3236 vm_ci_argc(ci),
3237 vm_ci_kwarg(ci));
3238 } else {
3239 ci = forward_cd->caller_ci;
3240 }
3241 can_fastpath = false;
3242 }
3243 // C functions calling iseqs will stack allocate a CI,
3244 // so we need to convert it to heap allocated
3245 if (!vm_ci_markable(ci)) {
3246 ci = vm_ci_new_runtime(
3247 vm_ci_mid(ci),
3248 vm_ci_flag(ci),
3249 vm_ci_argc(ci),
3250 vm_ci_kwarg(ci));
3251 can_fastpath = false;
3252 }
3253 argv[param_size - 1] = (VALUE)ci;
3254 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3255 return 0;
3256 }
3257
3258 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3259}
3260
3261static void
3262vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3263{
3264 // This case is when the caller is using a ... parameter.
3265 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3266 // In this case the caller's caller's CI will be on the stack.
3267 //
3268 // For example:
3269 //
3270 // def bar(a, b); a + b; end
3271 // def foo(...); bar(...); end
3272 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3273 //
3274 // Stack layout will be:
3275 //
3276 // > 1
3277 // > 2
3278 // > CI for foo(1, 2)
3279 // > cref_or_me
3280 // > specval
3281 // > type
3282 // > receiver
3283 // > CI for foo(1, 2), via `getlocal ...`
3284 // > ( SP points here )
3285 const VALUE * lep = VM_CF_LEP(cfp);
3286
3287 const rb_iseq_t *iseq;
3288
3289 // If we're in an escaped environment (lambda for example), get the iseq
3290 // from the captured env.
3291 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3292 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3293 iseq = env->iseq;
3294 }
3295 else { // Otherwise use the lep to find the caller
3296 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3297 }
3298
3299 // Our local storage is below the args we need to copy
3300 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3301
3302 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3303 VALUE * to = cfp->sp - 1; // clobber the CI
3304
3305 if (RTEST(splat)) {
3306 to -= 1; // clobber the splat array
3307 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3308 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3309 to += RARRAY_LEN(splat);
3310 }
3311
3312 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3313 MEMCPY(to, from, VALUE, argc);
3314 cfp->sp = to + argc;
3315
3316 // Stack layout should now be:
3317 //
3318 // > 1
3319 // > 2
3320 // > CI for foo(1, 2)
3321 // > cref_or_me
3322 // > specval
3323 // > type
3324 // > receiver
3325 // > 1
3326 // > 2
3327 // > ( SP points here )
3328}
3329
3330static VALUE
3331vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3332{
3333 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3334
3335 const struct rb_callcache *cc = calling->cc;
3336 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3337 int param_size = ISEQ_BODY(iseq)->param.size;
3338 int local_size = ISEQ_BODY(iseq)->local_table_size;
3339
3340 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3341
3342 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3343 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3344}
3345
3346static VALUE
3347vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3348{
3349 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3350
3351 const struct rb_callcache *cc = calling->cc;
3352 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3353 int param_size = ISEQ_BODY(iseq)->param.size;
3354 int local_size = ISEQ_BODY(iseq)->local_table_size;
3355
3356 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3357
3358 // Setting up local size and param size
3359 local_size = local_size + vm_ci_argc(calling->cd->ci);
3360 param_size = param_size + vm_ci_argc(calling->cd->ci);
3361
3362 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3363 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3364}
3365
3366static inline VALUE
3367vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3368 int opt_pc, int param_size, int local_size)
3369{
3370 const struct rb_callinfo *ci = calling->cd->ci;
3371 const struct rb_callcache *cc = calling->cc;
3372
3373 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3374 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3375 }
3376 else {
3377 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3378 }
3379}
3380
3381static inline VALUE
3382vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3383 int opt_pc, int param_size, int local_size)
3384{
3385 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3386 VALUE *argv = cfp->sp - calling->argc;
3387 VALUE *sp = argv + param_size;
3388 cfp->sp = argv - 1 /* recv */;
3389
3390 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3391 calling->block_handler, (VALUE)me,
3392 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3393 local_size - param_size,
3394 ISEQ_BODY(iseq)->stack_max);
3395 return Qundef;
3396}
3397
3398static inline VALUE
3399vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3400{
3401 const struct rb_callcache *cc = calling->cc;
3402 unsigned int i;
3403 VALUE *argv = cfp->sp - calling->argc;
3404 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3405 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3406 VALUE *src_argv = argv;
3407 VALUE *sp_orig, *sp;
3408 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3409
3410 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3411 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3412 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3413 dst_captured->code.val = src_captured->code.val;
3414 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3415 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3416 }
3417 else {
3418 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3419 }
3420 }
3421
3422 vm_pop_frame(ec, cfp, cfp->ep);
3423 cfp = ec->cfp;
3424
3425 sp_orig = sp = cfp->sp;
3426
3427 /* push self */
3428 sp[0] = calling->recv;
3429 sp++;
3430
3431 /* copy arguments */
3432 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3433 *sp++ = src_argv[i];
3434 }
3435
3436 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3437 calling->recv, calling->block_handler, (VALUE)me,
3438 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3439 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3440 ISEQ_BODY(iseq)->stack_max);
3441
3442 cfp->sp = sp_orig;
3443
3444 return Qundef;
3445}
3446
3447static void
3448ractor_unsafe_check(void)
3449{
3450 if (!rb_ractor_main_p()) {
3451 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3452 }
3453}
3454
3455static VALUE
3456call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3457{
3458 ractor_unsafe_check();
3459 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3460 return (*f)(recv, rb_ary_new4(argc, argv));
3461}
3462
3463static VALUE
3464call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3465{
3466 ractor_unsafe_check();
3467 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3468 return (*f)(argc, argv, recv);
3469}
3470
3471static VALUE
3472call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3473{
3474 ractor_unsafe_check();
3475 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3476 return (*f)(recv);
3477}
3478
3479static VALUE
3480call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3481{
3482 ractor_unsafe_check();
3483 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3484 return (*f)(recv, argv[0]);
3485}
3486
3487static VALUE
3488call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3489{
3490 ractor_unsafe_check();
3491 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3492 return (*f)(recv, argv[0], argv[1]);
3493}
3494
3495static VALUE
3496call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3497{
3498 ractor_unsafe_check();
3499 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3500 return (*f)(recv, argv[0], argv[1], argv[2]);
3501}
3502
3503static VALUE
3504call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3505{
3506 ractor_unsafe_check();
3507 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3508 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3509}
3510
3511static VALUE
3512call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3513{
3514 ractor_unsafe_check();
3515 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3516 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3517}
3518
3519static VALUE
3520call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3521{
3522 ractor_unsafe_check();
3524 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3525}
3526
3527static VALUE
3528call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3529{
3530 ractor_unsafe_check();
3532 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3533}
3534
3535static VALUE
3536call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3537{
3538 ractor_unsafe_check();
3540 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3541}
3542
3543static VALUE
3544call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3545{
3546 ractor_unsafe_check();
3548 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3549}
3550
3551static VALUE
3552call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3553{
3554 ractor_unsafe_check();
3556 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3557}
3558
3559static VALUE
3560call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3561{
3562 ractor_unsafe_check();
3564 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3565}
3566
3567static VALUE
3568call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3569{
3570 ractor_unsafe_check();
3572 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3573}
3574
3575static VALUE
3576call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3577{
3578 ractor_unsafe_check();
3580 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3581}
3582
3583static VALUE
3584call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3585{
3586 ractor_unsafe_check();
3588 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3589}
3590
3591static VALUE
3592call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3593{
3594 ractor_unsafe_check();
3596 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3597}
3598
3599static VALUE
3600ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3601{
3602 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3603 return (*f)(recv, rb_ary_new4(argc, argv));
3604}
3605
3606static VALUE
3607ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3608{
3609 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3610 return (*f)(argc, argv, recv);
3611}
3612
3613static VALUE
3614ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3615{
3616 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3617 return (*f)(recv);
3618}
3619
3620static VALUE
3621ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3622{
3623 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3624 return (*f)(recv, argv[0]);
3625}
3626
3627static VALUE
3628ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3629{
3630 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3631 return (*f)(recv, argv[0], argv[1]);
3632}
3633
3634static VALUE
3635ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3636{
3637 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3638 return (*f)(recv, argv[0], argv[1], argv[2]);
3639}
3640
3641static VALUE
3642ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643{
3644 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3645 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3646}
3647
3648static VALUE
3649ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3650{
3651 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3652 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3653}
3654
3655static VALUE
3656ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657{
3659 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3660}
3661
3662static VALUE
3663ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3664{
3666 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3667}
3668
3669static VALUE
3670ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3671{
3673 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3674}
3675
3676static VALUE
3677ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3678{
3680 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3681}
3682
3683static VALUE
3684ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3685{
3687 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3688}
3689
3690static VALUE
3691ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3692{
3694 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3695}
3696
3697static VALUE
3698ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3699{
3701 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3702}
3703
3704static VALUE
3705ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3706{
3708 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3709}
3710
3711static VALUE
3712ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3713{
3715 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3716}
3717
3718static VALUE
3719ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3720{
3722 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3723}
3724
3725static inline int
3726vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3727{
3728 const int ov_flags = RAISED_STACKOVERFLOW;
3729 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3730 if (rb_ec_raised_p(ec, ov_flags)) {
3731 rb_ec_raised_reset(ec, ov_flags);
3732 return TRUE;
3733 }
3734 return FALSE;
3735}
3736
3737#define CHECK_CFP_CONSISTENCY(func) \
3738 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3739 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3740
3741static inline
3742const rb_method_cfunc_t *
3743vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3744{
3745#if VM_DEBUG_VERIFY_METHOD_CACHE
3746 switch (me->def->type) {
3747 case VM_METHOD_TYPE_CFUNC:
3748 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3749 break;
3750# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3751 METHOD_BUG(ISEQ);
3752 METHOD_BUG(ATTRSET);
3753 METHOD_BUG(IVAR);
3754 METHOD_BUG(BMETHOD);
3755 METHOD_BUG(ZSUPER);
3756 METHOD_BUG(UNDEF);
3757 METHOD_BUG(OPTIMIZED);
3758 METHOD_BUG(MISSING);
3759 METHOD_BUG(REFINED);
3760 METHOD_BUG(ALIAS);
3761# undef METHOD_BUG
3762 default:
3763 rb_bug("wrong method type: %d", me->def->type);
3764 }
3765#endif
3766 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3767}
3768
3769static VALUE
3770vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3771 int argc, VALUE *argv, VALUE *stack_bottom)
3772{
3773 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3774 const struct rb_callinfo *ci = calling->cd->ci;
3775 const struct rb_callcache *cc = calling->cc;
3776 VALUE val;
3777 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3778 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3779
3780 VALUE recv = calling->recv;
3781 VALUE block_handler = calling->block_handler;
3782 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3783
3784 if (UNLIKELY(calling->kw_splat)) {
3785 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3786 }
3787
3788 VM_ASSERT(reg_cfp == ec->cfp);
3789
3790 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3791 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3792
3793 vm_push_frame(ec, NULL, frame_type, recv,
3794 block_handler, (VALUE)me,
3795 0, ec->cfp->sp, 0, 0);
3796
3797 int len = cfunc->argc;
3798 if (len >= 0) rb_check_arity(argc, len, len);
3799
3800 reg_cfp->sp = stack_bottom;
3801 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3802
3803 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3804
3805 rb_vm_pop_frame(ec);
3806
3807 VM_ASSERT(ec->cfp->sp == stack_bottom);
3808
3809 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3810 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3811
3812 return val;
3813}
3814
3815// Push a C method frame for a given cme. This is called when JIT code skipped
3816// pushing a frame but the C method reached a point where a frame is needed.
3817void
3818rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3819{
3820 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3821 rb_execution_context_t *ec = GET_EC();
3822 VALUE *sp = ec->cfp->sp;
3823 VALUE recv = *(sp - recv_idx - 1);
3824 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3825 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3826#if VM_CHECK_MODE > 0
3827 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3828 *(GET_EC()->cfp->sp) = Qfalse;
3829#endif
3830 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3831}
3832
3833// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3834bool
3835rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3836{
3837 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3838}
3839
3840static VALUE
3841vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3842{
3843 int argc = calling->argc;
3844 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3845 VALUE *argv = &stack_bottom[1];
3846
3847 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3848}
3849
3850static VALUE
3851vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3852{
3853 const struct rb_callinfo *ci = calling->cd->ci;
3854 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3855
3856 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3857 VALUE argv_ary;
3858 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3859 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3860 int argc = RARRAY_LENINT(argv_ary);
3861 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3862 VALUE *stack_bottom = reg_cfp->sp - 2;
3863
3864 VM_ASSERT(calling->argc == 1);
3865 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3866 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3867
3868 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3869 }
3870 else {
3871 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3872
3873 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3874 }
3875}
3876
3877static inline VALUE
3878vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3879{
3880 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3881 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3882
3883 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3884 return vm_call_cfunc_other(ec, reg_cfp, calling);
3885 }
3886
3887 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3888 calling->kw_splat = 0;
3889 int i;
3890 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3891 VALUE *sp = stack_bottom;
3892 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3893 for(i = 0; i < argc; i++) {
3894 *++sp = argv[i];
3895 }
3896 reg_cfp->sp = sp+1;
3897
3898 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3899}
3900
3901static inline VALUE
3902vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3903{
3904 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3905 VALUE argv_ary = reg_cfp->sp[-1];
3906 int argc = RARRAY_LENINT(argv_ary);
3907 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3908 VALUE last_hash;
3909 int argc_offset = 0;
3910
3911 if (UNLIKELY(argc > 0 &&
3912 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3913 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3914 if (!RHASH_EMPTY_P(last_hash)) {
3915 return vm_call_cfunc_other(ec, reg_cfp, calling);
3916 }
3917 argc_offset++;
3918 }
3919 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3920}
3921
3922static inline VALUE
3923vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3924{
3925 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3926 VALUE keyword_hash = reg_cfp->sp[-1];
3927
3928 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3929 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3930 }
3931
3932 return vm_call_cfunc_other(ec, reg_cfp, calling);
3933}
3934
3935static VALUE
3936vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3937{
3938 const struct rb_callinfo *ci = calling->cd->ci;
3939 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3940
3941 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3942 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3943 // f(*a)
3944 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3945 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3946 }
3947 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3948 // f(*a, **kw)
3949 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3950 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3951 }
3952 }
3953
3954 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3955 return vm_call_cfunc_other(ec, reg_cfp, calling);
3956}
3957
3958static VALUE
3959vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3960{
3961 const struct rb_callcache *cc = calling->cc;
3962 RB_DEBUG_COUNTER_INC(ccf_ivar);
3963 cfp->sp -= 1;
3964 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3965 return ivar;
3966}
3967
3968static VALUE
3969vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3970{
3971 RB_DEBUG_COUNTER_INC(ccf_attrset);
3972 VALUE val = *(cfp->sp - 1);
3973 cfp->sp -= 2;
3974 attr_index_t index = vm_cc_attr_index(cc);
3975 shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3976 ID id = vm_cc_cme(cc)->def->body.attr.id;
3977 rb_check_frozen(obj);
3978 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3979 if (UNDEF_P(res)) {
3980 switch (BUILTIN_TYPE(obj)) {
3981 case T_OBJECT:
3982 case T_CLASS:
3983 case T_MODULE:
3984 break;
3985 default:
3986 {
3987 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3988 if (!UNDEF_P(res)) {
3989 return res;
3990 }
3991 }
3992 }
3993 res = vm_setivar_slowpath_attr(obj, id, val, cc);
3994 }
3995 return res;
3996}
3997
3998static VALUE
3999vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4000{
4001 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4002}
4003
4004static inline VALUE
4005vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4006{
4007 rb_proc_t *proc;
4008 VALUE val;
4009 const struct rb_callcache *cc = calling->cc;
4010 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4011 VALUE procv = cme->def->body.bmethod.proc;
4012
4013 if (!RB_OBJ_SHAREABLE_P(procv) &&
4014 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4015 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4016 }
4017
4018 /* control block frame */
4019 GetProcPtr(procv, proc);
4020 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4021
4022 return val;
4023}
4024
4025static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4026
4027static VALUE
4028vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4029{
4030 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4031
4032 const struct rb_callcache *cc = calling->cc;
4033 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4034 VALUE procv = cme->def->body.bmethod.proc;
4035
4036 if (!RB_OBJ_SHAREABLE_P(procv) &&
4037 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4038 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4039 }
4040
4041 rb_proc_t *proc;
4042 GetProcPtr(procv, proc);
4043 const struct rb_block *block = &proc->block;
4044
4045 while (vm_block_type(block) == block_type_proc) {
4046 block = vm_proc_block(block->as.proc);
4047 }
4048 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4049
4050 const struct rb_captured_block *captured = &block->as.captured;
4051 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4052 VALUE * const argv = cfp->sp - calling->argc;
4053 const int arg_size = ISEQ_BODY(iseq)->param.size;
4054
4055 int opt_pc;
4056 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4057 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4058 }
4059 else {
4060 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4061 }
4062
4063 cfp->sp = argv - 1; // -1 for the receiver
4064
4065 vm_push_frame(ec, iseq,
4066 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4067 calling->recv,
4068 VM_GUARDED_PREV_EP(captured->ep),
4069 (VALUE)cme,
4070 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4071 argv + arg_size,
4072 ISEQ_BODY(iseq)->local_table_size - arg_size,
4073 ISEQ_BODY(iseq)->stack_max);
4074
4075 return Qundef;
4076}
4077
4078static VALUE
4079vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4080{
4081 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4082
4083 VALUE *argv;
4084 int argc;
4085 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4086 if (UNLIKELY(calling->heap_argv)) {
4087 argv = RARRAY_PTR(calling->heap_argv);
4088 cfp->sp -= 2;
4089 }
4090 else {
4091 argc = calling->argc;
4092 argv = ALLOCA_N(VALUE, argc);
4093 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4094 cfp->sp += - argc - 1;
4095 }
4096
4097 return vm_call_bmethod_body(ec, calling, argv);
4098}
4099
4100static VALUE
4101vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4102{
4103 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4104
4105 const struct rb_callcache *cc = calling->cc;
4106 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4107 VALUE procv = cme->def->body.bmethod.proc;
4108 rb_proc_t *proc;
4109 GetProcPtr(procv, proc);
4110 const struct rb_block *block = &proc->block;
4111
4112 while (vm_block_type(block) == block_type_proc) {
4113 block = vm_proc_block(block->as.proc);
4114 }
4115 if (vm_block_type(block) == block_type_iseq) {
4116 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4117 return vm_call_iseq_bmethod(ec, cfp, calling);
4118 }
4119
4120 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4121 return vm_call_noniseq_bmethod(ec, cfp, calling);
4122}
4123
4124VALUE
4125rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4126{
4127 VALUE klass = current_class;
4128
4129 /* for prepended Module, then start from cover class */
4130 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
4131 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4132 klass = RBASIC_CLASS(klass);
4133 }
4134
4135 while (RTEST(klass)) {
4136 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4137 if (owner == target_owner) {
4138 return klass;
4139 }
4140 klass = RCLASS_SUPER(klass);
4141 }
4142
4143 return current_class; /* maybe module function */
4144}
4145
4146static const rb_callable_method_entry_t *
4147aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4148{
4149 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4150 const rb_callable_method_entry_t *cme;
4151
4152 if (orig_me->defined_class == 0) {
4153 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4154 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4155 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4156
4157 if (me->def->reference_count == 1) {
4158 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4159 }
4160 else {
4162 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4163 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4164 }
4165 }
4166 else {
4167 cme = (const rb_callable_method_entry_t *)orig_me;
4168 }
4169
4170 VM_ASSERT(callable_method_entry_p(cme));
4171 return cme;
4172}
4173
4175rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4176{
4177 return aliased_callable_method_entry(me);
4178}
4179
4180static VALUE
4181vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4182{
4183 calling->cc = &VM_CC_ON_STACK(Qundef,
4184 vm_call_general,
4185 {{0}},
4186 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4187
4188 return vm_call_method_each_type(ec, cfp, calling);
4189}
4190
4191static enum method_missing_reason
4192ci_missing_reason(const struct rb_callinfo *ci)
4193{
4194 enum method_missing_reason stat = MISSING_NOENTRY;
4195 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4196 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4197 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4198 return stat;
4199}
4200
4201static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4202
4203static VALUE
4204vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4205 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4206{
4207 ASSUME(calling->argc >= 0);
4208
4209 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4210 int argc = calling->argc;
4211 VALUE recv = calling->recv;
4212 VALUE klass = CLASS_OF(recv);
4213 ID mid = rb_check_id(&symbol);
4214 flags |= VM_CALL_OPT_SEND;
4215
4216 if (UNLIKELY(! mid)) {
4217 mid = idMethodMissing;
4218 missing_reason = ci_missing_reason(ci);
4219 ec->method_missing_reason = missing_reason;
4220
4221 VALUE argv_ary;
4222 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4223 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4224 rb_ary_unshift(argv_ary, symbol);
4225
4226 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4227 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4228 VALUE exc = rb_make_no_method_exception(
4229 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4230
4231 rb_exc_raise(exc);
4232 }
4233 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4234 }
4235 else {
4236 /* E.g. when argc == 2
4237 *
4238 * | | | | TOPN
4239 * | | +------+
4240 * | | +---> | arg1 | 0
4241 * +------+ | +------+
4242 * | arg1 | -+ +-> | arg0 | 1
4243 * +------+ | +------+
4244 * | arg0 | ---+ | sym | 2
4245 * +------+ +------+
4246 * | recv | | recv | 3
4247 * --+------+--------+------+------
4248 */
4249 int i = argc;
4250 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4251 INC_SP(1);
4252 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4253 argc = ++calling->argc;
4254
4255 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4256 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4257 TOPN(i) = symbol;
4258 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4259 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4260 VALUE exc = rb_make_no_method_exception(
4261 rb_eNoMethodError, 0, recv, argc, argv, priv);
4262
4263 rb_exc_raise(exc);
4264 }
4265 else {
4266 TOPN(i) = rb_str_intern(symbol);
4267 }
4268 }
4269 }
4270
4271 struct rb_forwarding_call_data new_fcd = {
4272 .cd = {
4273 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4274 .cc = NULL,
4275 },
4276 .caller_ci = NULL,
4277 };
4278
4279 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4280 calling->cd = &new_fcd.cd;
4281 }
4282 else {
4283 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4284 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4285 new_fcd.caller_ci = caller_ci;
4286 calling->cd = (struct rb_call_data *)&new_fcd;
4287 }
4288 calling->cc = &VM_CC_ON_STACK(klass,
4289 vm_call_general,
4290 { .method_missing_reason = missing_reason },
4291 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4292
4293 if (flags & VM_CALL_FCALL) {
4294 return vm_call_method(ec, reg_cfp, calling);
4295 }
4296
4297 const struct rb_callcache *cc = calling->cc;
4298 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4299
4300 if (vm_cc_cme(cc) != NULL) {
4301 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4302 case METHOD_VISI_PUBLIC: /* likely */
4303 return vm_call_method_each_type(ec, reg_cfp, calling);
4304 case METHOD_VISI_PRIVATE:
4305 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4306 break;
4307 case METHOD_VISI_PROTECTED:
4308 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4309 break;
4310 default:
4311 VM_UNREACHABLE(vm_call_method);
4312 }
4313 return vm_call_method_missing(ec, reg_cfp, calling);
4314 }
4315
4316 return vm_call_method_nome(ec, reg_cfp, calling);
4317}
4318
4319static VALUE
4320vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4321{
4322 const struct rb_callinfo *ci = calling->cd->ci;
4323 int i;
4324 VALUE sym;
4325
4326 i = calling->argc - 1;
4327
4328 if (calling->argc == 0) {
4329 rb_raise(rb_eArgError, "no method name given");
4330 }
4331
4332 sym = TOPN(i);
4333 /* E.g. when i == 2
4334 *
4335 * | | | | TOPN
4336 * +------+ | |
4337 * | arg1 | ---+ | | 0
4338 * +------+ | +------+
4339 * | arg0 | -+ +-> | arg1 | 1
4340 * +------+ | +------+
4341 * | sym | +---> | arg0 | 2
4342 * +------+ +------+
4343 * | recv | | recv | 3
4344 * --+------+--------+------+------
4345 */
4346 /* shift arguments */
4347 if (i > 0) {
4348 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4349 }
4350 calling->argc -= 1;
4351 DEC_SP(1);
4352
4353 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4354}
4355
4356static VALUE
4357vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4358{
4359 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4360 const struct rb_callinfo *ci = calling->cd->ci;
4361 int flags = VM_CALL_FCALL;
4362 VALUE sym;
4363
4364 VALUE argv_ary;
4365 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4366 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4367 sym = rb_ary_shift(argv_ary);
4368 flags |= VM_CALL_ARGS_SPLAT;
4369 if (calling->kw_splat) {
4370 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4371 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4372 calling->kw_splat = 0;
4373 }
4374 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4375 }
4376
4377 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4378 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4379}
4380
4381static VALUE
4382vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4383{
4384 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4385 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4386}
4387
4388static VALUE
4389vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4390{
4391 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4392
4393 const struct rb_callinfo *ci = calling->cd->ci;
4394 int flags = vm_ci_flag(ci);
4395
4396 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4397 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4398 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4399 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4400 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4401 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4402 }
4403
4404 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4405 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4406}
4407
4408static VALUE
4409vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4410 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4411{
4412 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4413
4414 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4415 unsigned int argc, flag;
4416
4417 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4418 argc = ++calling->argc;
4419
4420 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4421 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4422 vm_check_canary(ec, reg_cfp->sp);
4423 if (argc > 1) {
4424 MEMMOVE(argv+1, argv, VALUE, argc-1);
4425 }
4426 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4427 INC_SP(1);
4428
4429 ec->method_missing_reason = reason;
4430
4431 struct rb_forwarding_call_data new_fcd = {
4432 .cd = {
4433 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4434 .cc = NULL,
4435 },
4436 .caller_ci = NULL,
4437 };
4438
4439 if (!(flag & VM_CALL_FORWARDING)) {
4440 calling->cd = &new_fcd.cd;
4441 }
4442 else {
4443 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4444 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4445 new_fcd.caller_ci = caller_ci;
4446 calling->cd = (struct rb_call_data *)&new_fcd;
4447 }
4448
4449 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4450 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4451 return vm_call_method(ec, reg_cfp, calling);
4452}
4453
4454static VALUE
4455vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4456{
4457 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4458}
4459
4460static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4461static VALUE
4462vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4463{
4464 klass = RCLASS_SUPER(klass);
4465
4466 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4467 if (cme == NULL) {
4468 return vm_call_method_nome(ec, cfp, calling);
4469 }
4470 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4471 cme->def->body.refined.orig_me) {
4472 cme = refined_method_callable_without_refinement(cme);
4473 }
4474
4475 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4476
4477 return vm_call_method_each_type(ec, cfp, calling);
4478}
4479
4480static inline VALUE
4481find_refinement(VALUE refinements, VALUE klass)
4482{
4483 if (NIL_P(refinements)) {
4484 return Qnil;
4485 }
4486 return rb_hash_lookup(refinements, klass);
4487}
4488
4489PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4490static rb_control_frame_t *
4491current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4492{
4493 rb_control_frame_t *top_cfp = cfp;
4494
4495 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4496 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4497
4498 do {
4499 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4500 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4501 /* TODO: orphan block */
4502 return top_cfp;
4503 }
4504 } while (cfp->iseq != local_iseq);
4505 }
4506 return cfp;
4507}
4508
4509static const rb_callable_method_entry_t *
4510refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4511{
4512 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4513 const rb_callable_method_entry_t *cme;
4514
4515 if (orig_me->defined_class == 0) {
4516 cme = NULL;
4518 }
4519 else {
4520 cme = (const rb_callable_method_entry_t *)orig_me;
4521 }
4522
4523 VM_ASSERT(callable_method_entry_p(cme));
4524
4525 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4526 cme = NULL;
4527 }
4528
4529 return cme;
4530}
4531
4532static const rb_callable_method_entry_t *
4533search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4534{
4535 ID mid = vm_ci_mid(calling->cd->ci);
4536 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4537 const struct rb_callcache * const cc = calling->cc;
4538 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4539
4540 for (; cref; cref = CREF_NEXT(cref)) {
4541 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4542 if (NIL_P(refinement)) continue;
4543
4544 const rb_callable_method_entry_t *const ref_me =
4545 rb_callable_method_entry(refinement, mid);
4546
4547 if (ref_me) {
4548 if (vm_cc_call(cc) == vm_call_super_method) {
4549 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4550 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4551 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4552 continue;
4553 }
4554 }
4555
4556 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4557 cme->def != ref_me->def) {
4558 cme = ref_me;
4559 }
4560 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4561 return cme;
4562 }
4563 }
4564 else {
4565 return NULL;
4566 }
4567 }
4568
4569 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4570 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4571 }
4572 else {
4573 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4574 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4575 return cme;
4576 }
4577}
4578
4579static VALUE
4580vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4581{
4582 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4583
4584 if (ref_cme) {
4585 if (calling->cd->cc) {
4586 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4587 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4588 return vm_call_method(ec, cfp, calling);
4589 }
4590 else {
4591 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4592 calling->cc= ref_cc;
4593 return vm_call_method(ec, cfp, calling);
4594 }
4595 }
4596 else {
4597 return vm_call_method_nome(ec, cfp, calling);
4598 }
4599}
4600
4601static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4602
4603NOINLINE(static VALUE
4604 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4605 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4606
4607static VALUE
4608vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4609 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4610{
4611 int argc = calling->argc;
4612
4613 /* remove self */
4614 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4615 DEC_SP(1);
4616
4617 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4618}
4619
4620static VALUE
4621vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4622{
4623 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4624
4625 const struct rb_callinfo *ci = calling->cd->ci;
4626 VALUE procval = calling->recv;
4627 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4628}
4629
4630static VALUE
4631vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4632{
4633 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4634
4635 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4636 const struct rb_callinfo *ci = calling->cd->ci;
4637
4638 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4639 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4640 }
4641 else {
4642 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4643 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4644 return vm_call_general(ec, reg_cfp, calling);
4645 }
4646}
4647
4648static VALUE
4649vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4650{
4651 VALUE recv = calling->recv;
4652
4653 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4654 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4655 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4656
4657 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4658 return internal_RSTRUCT_GET(recv, off);
4659}
4660
4661static VALUE
4662vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4663{
4664 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4665
4666 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4667 reg_cfp->sp -= 1;
4668 return ret;
4669}
4670
4671static VALUE
4672vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4673{
4674 VALUE recv = calling->recv;
4675
4676 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4677 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4678 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4679
4680 rb_check_frozen(recv);
4681
4682 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4683 internal_RSTRUCT_SET(recv, off, val);
4684
4685 return val;
4686}
4687
4688static VALUE
4689vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4690{
4691 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4692
4693 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4694 reg_cfp->sp -= 2;
4695 return ret;
4696}
4697
4698NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4699 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4700
4701#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4702 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4703 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4704 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4705 var = func; \
4706 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4707 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4708 } \
4709 else { \
4710 nohook; \
4711 var = func; \
4712 }
4713
4714static VALUE
4715vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4716 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4717{
4718 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4719 case OPTIMIZED_METHOD_TYPE_SEND:
4720 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4721 return vm_call_opt_send(ec, cfp, calling);
4722 case OPTIMIZED_METHOD_TYPE_CALL:
4723 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4724 return vm_call_opt_call(ec, cfp, calling);
4725 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4726 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4727 return vm_call_opt_block_call(ec, cfp, calling);
4728 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4729 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4730 rb_check_arity(calling->argc, 0, 0);
4731
4732 VALUE v;
4733 VM_CALL_METHOD_ATTR(v,
4734 vm_call_opt_struct_aref(ec, cfp, calling),
4735 set_vm_cc_ivar(cc); \
4736 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4737 return v;
4738 }
4739 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4740 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4741 rb_check_arity(calling->argc, 1, 1);
4742
4743 VALUE v;
4744 VM_CALL_METHOD_ATTR(v,
4745 vm_call_opt_struct_aset(ec, cfp, calling),
4746 set_vm_cc_ivar(cc); \
4747 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4748 return v;
4749 }
4750 default:
4751 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4752 }
4753}
4754
4755static VALUE
4756vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4757{
4758 const struct rb_callinfo *ci = calling->cd->ci;
4759 const struct rb_callcache *cc = calling->cc;
4760 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4761 VALUE v;
4762
4763 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4764
4765 switch (cme->def->type) {
4766 case VM_METHOD_TYPE_ISEQ:
4767 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4768 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4769 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4770 }
4771 else {
4772 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4773 return vm_call_iseq_setup(ec, cfp, calling);
4774 }
4775
4776 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4777 case VM_METHOD_TYPE_CFUNC:
4778 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4779 return vm_call_cfunc(ec, cfp, calling);
4780
4781 case VM_METHOD_TYPE_ATTRSET:
4782 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4783
4784 rb_check_arity(calling->argc, 1, 1);
4785
4786 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4787
4788 if (vm_cc_markable(cc)) {
4789 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4790 VM_CALL_METHOD_ATTR(v,
4791 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4792 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4793 }
4794 else {
4795 cc = &((struct rb_callcache) {
4796 .flags = T_IMEMO |
4797 (imemo_callcache << FL_USHIFT) |
4798 VM_CALLCACHE_UNMARKABLE |
4799 VM_CALLCACHE_ON_STACK,
4800 .klass = cc->klass,
4801 .cme_ = cc->cme_,
4802 .call_ = cc->call_,
4803 .aux_ = {
4804 .attr = {
4805 .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4806 }
4807 },
4808 });
4809
4810 VM_CALL_METHOD_ATTR(v,
4811 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4812 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4813 }
4814 return v;
4815
4816 case VM_METHOD_TYPE_IVAR:
4817 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4818 rb_check_arity(calling->argc, 0, 0);
4819 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4820 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4821 VM_CALL_METHOD_ATTR(v,
4822 vm_call_ivar(ec, cfp, calling),
4823 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4824 return v;
4825
4826 case VM_METHOD_TYPE_MISSING:
4827 vm_cc_method_missing_reason_set(cc, 0);
4828 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4829 return vm_call_method_missing(ec, cfp, calling);
4830
4831 case VM_METHOD_TYPE_BMETHOD:
4832 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4833 return vm_call_bmethod(ec, cfp, calling);
4834
4835 case VM_METHOD_TYPE_ALIAS:
4836 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4837 return vm_call_alias(ec, cfp, calling);
4838
4839 case VM_METHOD_TYPE_OPTIMIZED:
4840 return vm_call_optimized(ec, cfp, calling, ci, cc);
4841
4842 case VM_METHOD_TYPE_UNDEF:
4843 break;
4844
4845 case VM_METHOD_TYPE_ZSUPER:
4846 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4847
4848 case VM_METHOD_TYPE_REFINED:
4849 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4850 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4851 return vm_call_refined(ec, cfp, calling);
4852 }
4853
4854 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4855}
4856
4857NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4858
4859static VALUE
4860vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4861{
4862 /* method missing */
4863 const struct rb_callinfo *ci = calling->cd->ci;
4864 const int stat = ci_missing_reason(ci);
4865
4866 if (vm_ci_mid(ci) == idMethodMissing) {
4867 if (UNLIKELY(calling->heap_argv)) {
4868 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4869 }
4870 else {
4871 rb_control_frame_t *reg_cfp = cfp;
4872 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4873 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4874 }
4875 }
4876 else {
4877 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4878 }
4879}
4880
4881/* Protected method calls and super invocations need to check that the receiver
4882 * (self for super) inherits the module on which the method is defined.
4883 * In the case of refinements, it should consider the original class not the
4884 * refinement.
4885 */
4886static VALUE
4887vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4888{
4889 VALUE defined_class = me->defined_class;
4890 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4891 return NIL_P(refined_class) ? defined_class : refined_class;
4892}
4893
4894static inline VALUE
4895vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4896{
4897 const struct rb_callinfo *ci = calling->cd->ci;
4898 const struct rb_callcache *cc = calling->cc;
4899
4900 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4901
4902 if (vm_cc_cme(cc) != NULL) {
4903 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4904 case METHOD_VISI_PUBLIC: /* likely */
4905 return vm_call_method_each_type(ec, cfp, calling);
4906
4907 case METHOD_VISI_PRIVATE:
4908 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4909 enum method_missing_reason stat = MISSING_PRIVATE;
4910 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4911
4912 vm_cc_method_missing_reason_set(cc, stat);
4913 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4914 return vm_call_method_missing(ec, cfp, calling);
4915 }
4916 return vm_call_method_each_type(ec, cfp, calling);
4917
4918 case METHOD_VISI_PROTECTED:
4919 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4920 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4921 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4922 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4923 return vm_call_method_missing(ec, cfp, calling);
4924 }
4925 else {
4926 /* caching method info to dummy cc */
4927 VM_ASSERT(vm_cc_cme(cc) != NULL);
4928 struct rb_callcache cc_on_stack = *cc;
4929 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4930 calling->cc = &cc_on_stack;
4931 return vm_call_method_each_type(ec, cfp, calling);
4932 }
4933 }
4934 return vm_call_method_each_type(ec, cfp, calling);
4935
4936 default:
4937 rb_bug("unreachable");
4938 }
4939 }
4940 else {
4941 return vm_call_method_nome(ec, cfp, calling);
4942 }
4943}
4944
4945static VALUE
4946vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4947{
4948 RB_DEBUG_COUNTER_INC(ccf_general);
4949 return vm_call_method(ec, reg_cfp, calling);
4950}
4951
4952void
4953rb_vm_cc_general(const struct rb_callcache *cc)
4954{
4955 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4956 VM_ASSERT(cc != vm_cc_empty());
4957
4958 *(vm_call_handler *)&cc->call_ = vm_call_general;
4959}
4960
4961static VALUE
4962vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4963{
4964 RB_DEBUG_COUNTER_INC(ccf_super_method);
4965
4966 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4967 // can merge the function and the address of the function becomes same.
4968 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4969 if (ec == NULL) rb_bug("unreachable");
4970
4971 /* this check is required to distinguish with other functions. */
4972 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4973 return vm_call_method(ec, reg_cfp, calling);
4974}
4975
4976/* super */
4977
4978static inline VALUE
4979vm_search_normal_superclass(VALUE klass)
4980{
4981 if (BUILTIN_TYPE(klass) == T_ICLASS &&
4982 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4983 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4984 klass = RBASIC(klass)->klass;
4985 }
4986 klass = RCLASS_ORIGIN(klass);
4987 return RCLASS_SUPER(klass);
4988}
4989
4990NORETURN(static void vm_super_outside(void));
4991
4992static void
4993vm_super_outside(void)
4994{
4995 rb_raise(rb_eNoMethodError, "super called outside of method");
4996}
4997
4998static const struct rb_callcache *
4999empty_cc_for_super(void)
5000{
5001 return &vm_empty_cc_for_super;
5002}
5003
5004static const struct rb_callcache *
5005vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5006{
5007 VALUE current_defined_class;
5008 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5009
5010 if (!me) {
5011 vm_super_outside();
5012 }
5013
5014 current_defined_class = vm_defined_class_for_protected_call(me);
5015
5016 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5017 reg_cfp->iseq != method_entry_iseqptr(me) &&
5018 !rb_obj_is_kind_of(recv, current_defined_class)) {
5019 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5020 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5021
5022 if (m) { /* not bound UnboundMethod */
5023 rb_raise(rb_eTypeError,
5024 "self has wrong type to call super in this context: "
5025 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5026 rb_obj_class(recv), m);
5027 }
5028 }
5029
5030 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5031 rb_raise(rb_eRuntimeError,
5032 "implicit argument passing of super from method defined"
5033 " by define_method() is not supported."
5034 " Specify all arguments explicitly.");
5035 }
5036
5037 ID mid = me->def->original_id;
5038
5039 if (!vm_ci_markable(cd->ci)) {
5040 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5041 }
5042 else {
5043 // update iseq. really? (TODO)
5044 cd->ci = vm_ci_new_runtime(mid,
5045 vm_ci_flag(cd->ci),
5046 vm_ci_argc(cd->ci),
5047 vm_ci_kwarg(cd->ci));
5048
5049 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5050 }
5051
5052 const struct rb_callcache *cc;
5053
5054 VALUE klass = vm_search_normal_superclass(me->defined_class);
5055
5056 if (!klass) {
5057 /* bound instance method of module */
5058 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5059 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5060 }
5061 else {
5062 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5063 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5064
5065 // define_method can cache for different method id
5066 if (cached_cme == NULL) {
5067 // empty_cc_for_super is not markable object
5068 cd->cc = empty_cc_for_super();
5069 }
5070 else if (cached_cme->called_id != mid) {
5071 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5072 if (cme) {
5073 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5074 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5075 }
5076 else {
5077 cd->cc = cc = empty_cc_for_super();
5078 }
5079 }
5080 else {
5081 switch (cached_cme->def->type) {
5082 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5083 case VM_METHOD_TYPE_REFINED:
5084 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5085 case VM_METHOD_TYPE_ATTRSET:
5086 case VM_METHOD_TYPE_IVAR:
5087 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5088 break;
5089 default:
5090 break; // use fastpath
5091 }
5092 }
5093 }
5094
5095 VM_ASSERT((vm_cc_cme(cc), true));
5096
5097 return cc;
5098}
5099
5100/* yield */
5101
5102static inline int
5103block_proc_is_lambda(const VALUE procval)
5104{
5105 rb_proc_t *proc;
5106
5107 if (procval) {
5108 GetProcPtr(procval, proc);
5109 return proc->is_lambda;
5110 }
5111 else {
5112 return 0;
5113 }
5114}
5115
5116static VALUE
5117vm_yield_with_cfunc(rb_execution_context_t *ec,
5118 const struct rb_captured_block *captured,
5119 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5121{
5122 int is_lambda = FALSE; /* TODO */
5123 VALUE val, arg, blockarg;
5124 int frame_flag;
5125 const struct vm_ifunc *ifunc = captured->code.ifunc;
5126
5127 if (is_lambda) {
5128 arg = rb_ary_new4(argc, argv);
5129 }
5130 else if (argc == 0) {
5131 arg = Qnil;
5132 }
5133 else {
5134 arg = argv[0];
5135 }
5136
5137 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5138
5139 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5140 if (kw_splat) {
5141 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5142 }
5143
5144 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5145 frame_flag,
5146 self,
5147 VM_GUARDED_PREV_EP(captured->ep),
5148 (VALUE)me,
5149 0, ec->cfp->sp, 0, 0);
5150 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5151 rb_vm_pop_frame(ec);
5152
5153 return val;
5154}
5155
5156VALUE
5157rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5158{
5159 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5160}
5161
5162static VALUE
5163vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5164{
5165 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5166}
5167
5168static inline int
5169vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5170{
5171 int i;
5172 long len = RARRAY_LEN(ary);
5173
5174 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5175
5176 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5177 argv[i] = RARRAY_AREF(ary, i);
5178 }
5179
5180 return i;
5181}
5182
5183static inline VALUE
5184vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5185{
5186 VALUE ary, arg0 = argv[0];
5187 ary = rb_check_array_type(arg0);
5188#if 0
5189 argv[0] = arg0;
5190#else
5191 VM_ASSERT(argv[0] == arg0);
5192#endif
5193 return ary;
5194}
5195
5196static int
5197vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5198{
5199 if (rb_simple_iseq_p(iseq)) {
5200 rb_control_frame_t *cfp = ec->cfp;
5201 VALUE arg0;
5202
5203 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5204
5205 if (arg_setup_type == arg_setup_block &&
5206 calling->argc == 1 &&
5207 ISEQ_BODY(iseq)->param.flags.has_lead &&
5208 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5209 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5210 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5211 }
5212
5213 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5214 if (arg_setup_type == arg_setup_block) {
5215 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5216 int i;
5217 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5218 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5219 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5220 }
5221 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5222 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5223 }
5224 }
5225 else {
5226 argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5227 }
5228 }
5229
5230 return 0;
5231 }
5232 else {
5233 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5234 }
5235}
5236
5237static int
5238vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5239{
5240 struct rb_calling_info calling_entry, *calling;
5241
5242 calling = &calling_entry;
5243 calling->argc = argc;
5244 calling->block_handler = block_handler;
5245 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5246 calling->recv = Qundef;
5247 calling->heap_argv = 0;
5248 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5249
5250 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5251}
5252
5253/* ruby iseq -> ruby block */
5254
5255static VALUE
5256vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5257 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5258 bool is_lambda, VALUE block_handler)
5259{
5260 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5261 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5262 const int arg_size = ISEQ_BODY(iseq)->param.size;
5263 VALUE * const rsp = GET_SP() - calling->argc;
5264 VALUE * const argv = rsp;
5265 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5266
5267 SET_SP(rsp);
5268
5269 vm_push_frame(ec, iseq,
5270 VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
5271 captured->self,
5272 VM_GUARDED_PREV_EP(captured->ep), 0,
5273 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5274 rsp + arg_size,
5275 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5276
5277 return Qundef;
5278}
5279
5280static VALUE
5281vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5282 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5283 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5284{
5285 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5286 int flags = vm_ci_flag(ci);
5287
5288 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5289 ((calling->argc == 0) ||
5290 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5291 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5292 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5293 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5294 flags = 0;
5295 if (UNLIKELY(calling->heap_argv)) {
5296#if VM_ARGC_STACK_MAX < 0
5297 if (RARRAY_LEN(calling->heap_argv) < 1) {
5298 rb_raise(rb_eArgError, "no receiver given");
5299 }
5300#endif
5301 calling->recv = rb_ary_shift(calling->heap_argv);
5302 // Modify stack to avoid cfp consistency error
5303 reg_cfp->sp++;
5304 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5305 reg_cfp->sp[-2] = calling->recv;
5306 flags |= VM_CALL_ARGS_SPLAT;
5307 }
5308 else {
5309 if (calling->argc < 1) {
5310 rb_raise(rb_eArgError, "no receiver given");
5311 }
5312 calling->recv = TOPN(--calling->argc);
5313 }
5314 if (calling->kw_splat) {
5315 flags |= VM_CALL_KW_SPLAT;
5316 }
5317 }
5318 else {
5319 if (calling->argc < 1) {
5320 rb_raise(rb_eArgError, "no receiver given");
5321 }
5322 calling->recv = TOPN(--calling->argc);
5323 }
5324
5325 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5326}
5327
5328static VALUE
5329vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5330 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5331 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5332{
5333 VALUE val;
5334 int argc;
5335 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5336 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5337 argc = calling->argc;
5338 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5339 POPN(argc); /* TODO: should put before C/yield? */
5340 return val;
5341}
5342
5343static VALUE
5344vm_proc_to_block_handler(VALUE procval)
5345{
5346 const struct rb_block *block = vm_proc_block(procval);
5347
5348 switch (vm_block_type(block)) {
5349 case block_type_iseq:
5350 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5351 case block_type_ifunc:
5352 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5353 case block_type_symbol:
5354 return VM_BH_FROM_SYMBOL(block->as.symbol);
5355 case block_type_proc:
5356 return VM_BH_FROM_PROC(block->as.proc);
5357 }
5358 VM_UNREACHABLE(vm_yield_with_proc);
5359 return Qundef;
5360}
5361
5362static VALUE
5363vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5364 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5365 bool is_lambda, VALUE block_handler)
5366{
5367 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5368 VALUE proc = VM_BH_TO_PROC(block_handler);
5369 is_lambda = block_proc_is_lambda(proc);
5370 block_handler = vm_proc_to_block_handler(proc);
5371 }
5372
5373 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5374}
5375
5376static inline VALUE
5377vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5378 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5379 bool is_lambda, VALUE block_handler)
5380{
5381 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5382 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5383 bool is_lambda, VALUE block_handler);
5384
5385 switch (vm_block_handler_type(block_handler)) {
5386 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5387 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5388 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5389 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5390 default: rb_bug("vm_invoke_block: unreachable");
5391 }
5392
5393 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5394}
5395
5396static VALUE
5397vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5398{
5399 const rb_execution_context_t *ec = GET_EC();
5400 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5401 struct rb_captured_block *captured;
5402
5403 if (cfp == 0) {
5404 rb_bug("vm_make_proc_with_iseq: unreachable");
5405 }
5406
5407 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5408 captured->code.iseq = blockiseq;
5409
5410 return rb_vm_make_proc(ec, captured, rb_cProc);
5411}
5412
5413static VALUE
5414vm_once_exec(VALUE iseq)
5415{
5416 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5417 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5418}
5419
5420static VALUE
5421vm_once_clear(VALUE data)
5422{
5423 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5424 is->once.running_thread = NULL;
5425 return Qnil;
5426}
5427
5428/* defined insn */
5429
5430static bool
5431check_respond_to_missing(VALUE obj, VALUE v)
5432{
5433 VALUE args[2];
5434 VALUE r;
5435
5436 args[0] = obj; args[1] = Qfalse;
5437 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5438 if (!UNDEF_P(r) && RTEST(r)) {
5439 return true;
5440 }
5441 else {
5442 return false;
5443 }
5444}
5445
5446static bool
5447vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5448{
5449 VALUE klass;
5450 enum defined_type type = (enum defined_type)op_type;
5451
5452 switch (type) {
5453 case DEFINED_IVAR:
5454 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5455 break;
5456 case DEFINED_GVAR:
5457 return rb_gvar_defined(SYM2ID(obj));
5458 break;
5459 case DEFINED_CVAR: {
5460 const rb_cref_t *cref = vm_get_cref(GET_EP());
5461 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5462 return rb_cvar_defined(klass, SYM2ID(obj));
5463 break;
5464 }
5465 case DEFINED_CONST:
5466 case DEFINED_CONST_FROM: {
5467 bool allow_nil = type == DEFINED_CONST;
5468 klass = v;
5469 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5470 break;
5471 }
5472 case DEFINED_FUNC:
5473 klass = CLASS_OF(v);
5474 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5475 break;
5476 case DEFINED_METHOD:{
5477 VALUE klass = CLASS_OF(v);
5478 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5479
5480 if (me) {
5481 switch (METHOD_ENTRY_VISI(me)) {
5482 case METHOD_VISI_PRIVATE:
5483 break;
5484 case METHOD_VISI_PROTECTED:
5485 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5486 break;
5487 }
5488 case METHOD_VISI_PUBLIC:
5489 return true;
5490 break;
5491 default:
5492 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5493 }
5494 }
5495 else {
5496 return check_respond_to_missing(obj, v);
5497 }
5498 break;
5499 }
5500 case DEFINED_YIELD:
5501 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5502 return true;
5503 }
5504 break;
5505 case DEFINED_ZSUPER:
5506 {
5507 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5508
5509 if (me) {
5510 VALUE klass = vm_search_normal_superclass(me->defined_class);
5511 if (!klass) return false;
5512
5513 ID id = me->def->original_id;
5514
5515 return rb_method_boundp(klass, id, 0);
5516 }
5517 }
5518 break;
5519 case DEFINED_REF:
5520 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5521 default:
5522 rb_bug("unimplemented defined? type (VM)");
5523 break;
5524 }
5525
5526 return false;
5527}
5528
5529bool
5530rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5531{
5532 return vm_defined(ec, reg_cfp, op_type, obj, v);
5533}
5534
5535static const VALUE *
5536vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5537{
5538 rb_num_t i;
5539 const VALUE *ep = reg_ep;
5540 for (i = 0; i < lv; i++) {
5541 ep = GET_PREV_EP(ep);
5542 }
5543 return ep;
5544}
5545
5546static VALUE
5547vm_get_special_object(const VALUE *const reg_ep,
5548 enum vm_special_object_type type)
5549{
5550 switch (type) {
5551 case VM_SPECIAL_OBJECT_VMCORE:
5552 return rb_mRubyVMFrozenCore;
5553 case VM_SPECIAL_OBJECT_CBASE:
5554 return vm_get_cbase(reg_ep);
5555 case VM_SPECIAL_OBJECT_CONST_BASE:
5556 return vm_get_const_base(reg_ep);
5557 default:
5558 rb_bug("putspecialobject insn: unknown value_type %d", type);
5559 }
5560}
5561
5562static VALUE
5563vm_concat_array(VALUE ary1, VALUE ary2st)
5564{
5565 const VALUE ary2 = ary2st;
5566 VALUE tmp1 = rb_check_to_array(ary1);
5567 VALUE tmp2 = rb_check_to_array(ary2);
5568
5569 if (NIL_P(tmp1)) {
5570 tmp1 = rb_ary_new3(1, ary1);
5571 }
5572 if (tmp1 == ary1) {
5573 tmp1 = rb_ary_dup(ary1);
5574 }
5575
5576 if (NIL_P(tmp2)) {
5577 return rb_ary_push(tmp1, ary2);
5578 } else {
5579 return rb_ary_concat(tmp1, tmp2);
5580 }
5581}
5582
5583static VALUE
5584vm_concat_to_array(VALUE ary1, VALUE ary2st)
5585{
5586 /* ary1 must be a newly created array */
5587 const VALUE ary2 = ary2st;
5588 VALUE tmp2 = rb_check_to_array(ary2);
5589
5590 if (NIL_P(tmp2)) {
5591 return rb_ary_push(ary1, ary2);
5592 } else {
5593 return rb_ary_concat(ary1, tmp2);
5594 }
5595}
5596
5597// YJIT implementation is using the C function
5598// and needs to call a non-static function
5599VALUE
5600rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5601{
5602 return vm_concat_array(ary1, ary2st);
5603}
5604
5605VALUE
5606rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5607{
5608 return vm_concat_to_array(ary1, ary2st);
5609}
5610
5611static VALUE
5612vm_splat_array(VALUE flag, VALUE ary)
5613{
5614 VALUE tmp = rb_check_to_array(ary);
5615 if (NIL_P(tmp)) {
5616 return rb_ary_new3(1, ary);
5617 }
5618 else if (RTEST(flag)) {
5619 return rb_ary_dup(tmp);
5620 }
5621 else {
5622 return tmp;
5623 }
5624}
5625
5626// YJIT implementation is using the C function
5627// and needs to call a non-static function
5628VALUE
5629rb_vm_splat_array(VALUE flag, VALUE ary)
5630{
5631 return vm_splat_array(flag, ary);
5632}
5633
5634static VALUE
5635vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5636{
5637 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5638
5639 if (flag & VM_CHECKMATCH_ARRAY) {
5640 long i;
5641 const long n = RARRAY_LEN(pattern);
5642
5643 for (i = 0; i < n; i++) {
5644 VALUE v = RARRAY_AREF(pattern, i);
5645 VALUE c = check_match(ec, v, target, type);
5646
5647 if (RTEST(c)) {
5648 return c;
5649 }
5650 }
5651 return Qfalse;
5652 }
5653 else {
5654 return check_match(ec, pattern, target, type);
5655 }
5656}
5657
5658VALUE
5659rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5660{
5661 return vm_check_match(ec, target, pattern, flag);
5662}
5663
5664static VALUE
5665vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5666{
5667 const VALUE kw_bits = *(ep - bits);
5668
5669 if (FIXNUM_P(kw_bits)) {
5670 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5671 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5672 return Qfalse;
5673 }
5674 else {
5675 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5676 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5677 }
5678 return Qtrue;
5679}
5680
5681static void
5682vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5683{
5684 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5685 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5686 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5687 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5688
5689 switch (flag) {
5690 case RUBY_EVENT_CALL:
5691 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5692 return;
5693 case RUBY_EVENT_C_CALL:
5694 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5695 return;
5696 case RUBY_EVENT_RETURN:
5697 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5698 return;
5700 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5701 return;
5702 }
5703 }
5704}
5705
5706static VALUE
5707vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5708{
5709 if (!rb_const_defined_at(cbase, id)) {
5710 return 0;
5711 }
5712 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5713 return rb_public_const_get_at(cbase, id);
5714 }
5715 else {
5716 return rb_const_get_at(cbase, id);
5717 }
5718}
5719
5720static VALUE
5721vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5722{
5723 if (!RB_TYPE_P(klass, T_CLASS)) {
5724 return 0;
5725 }
5726 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5727 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5728
5729 if (tmp != super) {
5730 rb_raise(rb_eTypeError,
5731 "superclass mismatch for class %"PRIsVALUE"",
5732 rb_id2str(id));
5733 }
5734 else {
5735 return klass;
5736 }
5737 }
5738 else {
5739 return klass;
5740 }
5741}
5742
5743static VALUE
5744vm_check_if_module(ID id, VALUE mod)
5745{
5746 if (!RB_TYPE_P(mod, T_MODULE)) {
5747 return 0;
5748 }
5749 else {
5750 return mod;
5751 }
5752}
5753
5754static VALUE
5755declare_under(ID id, VALUE cbase, VALUE c)
5756{
5757 rb_set_class_path_string(c, cbase, rb_id2str(id));
5758 rb_const_set(cbase, id, c);
5759 return c;
5760}
5761
5762static VALUE
5763vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5764{
5765 /* new class declaration */
5766 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5767 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5769 rb_class_inherited(s, c);
5770 return c;
5771}
5772
5773static VALUE
5774vm_declare_module(ID id, VALUE cbase)
5775{
5776 /* new module declaration */
5777 return declare_under(id, cbase, rb_module_new());
5778}
5779
5780NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5781static void
5782unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5783{
5784 VALUE name = rb_id2str(id);
5785 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5786 name, type);
5787 VALUE location = rb_const_source_location_at(cbase, id);
5788 if (!NIL_P(location)) {
5789 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5790 " previous definition of %"PRIsVALUE" was here",
5791 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5792 }
5794}
5795
5796static VALUE
5797vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5798{
5799 VALUE klass;
5800
5801 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5802 rb_raise(rb_eTypeError,
5803 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5804 rb_obj_class(super));
5805 }
5806
5807 vm_check_if_namespace(cbase);
5808
5809 /* find klass */
5810 rb_autoload_load(cbase, id);
5811 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5812 if (!vm_check_if_class(id, flags, super, klass))
5813 unmatched_redefinition("class", cbase, id, klass);
5814 return klass;
5815 }
5816 else {
5817 return vm_declare_class(id, flags, cbase, super);
5818 }
5819}
5820
5821static VALUE
5822vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5823{
5824 VALUE mod;
5825
5826 vm_check_if_namespace(cbase);
5827 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5828 if (!vm_check_if_module(id, mod))
5829 unmatched_redefinition("module", cbase, id, mod);
5830 return mod;
5831 }
5832 else {
5833 return vm_declare_module(id, cbase);
5834 }
5835}
5836
5837static VALUE
5838vm_find_or_create_class_by_id(ID id,
5839 rb_num_t flags,
5840 VALUE cbase,
5841 VALUE super)
5842{
5843 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5844
5845 switch (type) {
5846 case VM_DEFINECLASS_TYPE_CLASS:
5847 /* classdef returns class scope value */
5848 return vm_define_class(id, flags, cbase, super);
5849
5850 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5851 /* classdef returns class scope value */
5852 return rb_singleton_class(cbase);
5853
5854 case VM_DEFINECLASS_TYPE_MODULE:
5855 /* classdef returns class scope value */
5856 return vm_define_module(id, flags, cbase);
5857
5858 default:
5859 rb_bug("unknown defineclass type: %d", (int)type);
5860 }
5861}
5862
5863static rb_method_visibility_t
5864vm_scope_visibility_get(const rb_execution_context_t *ec)
5865{
5866 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5867
5868 if (!vm_env_cref_by_cref(cfp->ep)) {
5869 return METHOD_VISI_PUBLIC;
5870 }
5871 else {
5872 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5873 }
5874}
5875
5876static int
5877vm_scope_module_func_check(const rb_execution_context_t *ec)
5878{
5879 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5880
5881 if (!vm_env_cref_by_cref(cfp->ep)) {
5882 return FALSE;
5883 }
5884 else {
5885 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5886 }
5887}
5888
5889static void
5890vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5891{
5892 VALUE klass;
5893 rb_method_visibility_t visi;
5894 rb_cref_t *cref = vm_ec_cref(ec);
5895
5896 if (is_singleton) {
5897 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5898 visi = METHOD_VISI_PUBLIC;
5899 }
5900 else {
5901 klass = CREF_CLASS_FOR_DEFINITION(cref);
5902 visi = vm_scope_visibility_get(ec);
5903 }
5904
5905 if (NIL_P(klass)) {
5906 rb_raise(rb_eTypeError, "no class/module to add method");
5907 }
5908
5909 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5910 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5911 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5912
5913 RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
5914 }
5915
5916 if (!is_singleton && vm_scope_module_func_check(ec)) {
5917 klass = rb_singleton_class(klass);
5918 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5919 }
5920}
5921
5922static VALUE
5923vm_invokeblock_i(struct rb_execution_context_struct *ec,
5924 struct rb_control_frame_struct *reg_cfp,
5925 struct rb_calling_info *calling)
5926{
5927 const struct rb_callinfo *ci = calling->cd->ci;
5928 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5929
5930 if (block_handler == VM_BLOCK_HANDLER_NONE) {
5931 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5932 }
5933 else {
5934 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5935 }
5936}
5937
5938enum method_explorer_type {
5939 mexp_search_method,
5940 mexp_search_invokeblock,
5941 mexp_search_super,
5942};
5943
5944static inline VALUE
5945vm_sendish(
5946 struct rb_execution_context_struct *ec,
5947 struct rb_control_frame_struct *reg_cfp,
5948 struct rb_call_data *cd,
5949 VALUE block_handler,
5950 enum method_explorer_type method_explorer
5951) {
5952 VALUE val = Qundef;
5953 const struct rb_callinfo *ci = cd->ci;
5954 const struct rb_callcache *cc;
5955 int argc = vm_ci_argc(ci);
5956 VALUE recv = TOPN(argc);
5957 struct rb_calling_info calling = {
5958 .block_handler = block_handler,
5959 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5960 .recv = recv,
5961 .argc = argc,
5962 .cd = cd,
5963 };
5964
5965 switch (method_explorer) {
5966 case mexp_search_method:
5967 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5968 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5969 break;
5970 case mexp_search_super:
5971 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5972 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5973 break;
5974 case mexp_search_invokeblock:
5975 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5976 break;
5977 }
5978 return val;
5979}
5980
5981VALUE
5982rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5983{
5984 stack_check(ec);
5985
5986 struct rb_forwarding_call_data adjusted_cd;
5987 struct rb_callinfo adjusted_ci;
5988
5989 VALUE bh;
5990 VALUE val;
5991
5992 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5993 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5994
5995 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5996
5997 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
5998 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
5999 }
6000 }
6001 else {
6002 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6003 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6004 }
6005
6006 VM_EXEC(ec, val);
6007 return val;
6008}
6009
6010VALUE
6011rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6012{
6013 stack_check(ec);
6014 VALUE bh = VM_BLOCK_HANDLER_NONE;
6015 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6016 VM_EXEC(ec, val);
6017 return val;
6018}
6019
6020VALUE
6021rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6022{
6023 stack_check(ec);
6024 struct rb_forwarding_call_data adjusted_cd;
6025 struct rb_callinfo adjusted_ci;
6026
6027 VALUE bh;
6028 VALUE val;
6029
6030 if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6031 bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6032
6033 val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6034
6035 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6036 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6037 }
6038 }
6039 else {
6040 bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6041 val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6042 }
6043
6044 VM_EXEC(ec, val);
6045 return val;
6046}
6047
6048VALUE
6049rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6050{
6051 stack_check(ec);
6052 VALUE bh = VM_BLOCK_HANDLER_NONE;
6053 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6054 VM_EXEC(ec, val);
6055 return val;
6056}
6057
6058/* object.c */
6059VALUE rb_nil_to_s(VALUE);
6060VALUE rb_true_to_s(VALUE);
6061VALUE rb_false_to_s(VALUE);
6062/* numeric.c */
6063VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6064VALUE rb_fix_to_s(VALUE);
6065/* variable.c */
6066VALUE rb_mod_to_s(VALUE);
6068
6069static VALUE
6070vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6071{
6072 int type = TYPE(recv);
6073 if (type == T_STRING) {
6074 return recv;
6075 }
6076
6077 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6078
6079 switch (type) {
6080 case T_SYMBOL:
6081 if (check_method_basic_definition(vm_cc_cme(cc))) {
6082 // rb_sym_to_s() allocates a mutable string, but since we are only
6083 // going to use this string for interpolation, it's fine to use the
6084 // frozen string.
6085 return rb_sym2str(recv);
6086 }
6087 break;
6088 case T_MODULE:
6089 case T_CLASS:
6090 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6091 // rb_mod_to_s() allocates a mutable string, but since we are only
6092 // going to use this string for interpolation, it's fine to use the
6093 // frozen string.
6094 VALUE val = rb_mod_name(recv);
6095 if (NIL_P(val)) {
6096 val = rb_mod_to_s(recv);
6097 }
6098 return val;
6099 }
6100 break;
6101 case T_NIL:
6102 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6103 return rb_nil_to_s(recv);
6104 }
6105 break;
6106 case T_TRUE:
6107 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6108 return rb_true_to_s(recv);
6109 }
6110 break;
6111 case T_FALSE:
6112 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6113 return rb_false_to_s(recv);
6114 }
6115 break;
6116 case T_FIXNUM:
6117 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6118 return rb_fix_to_s(recv);
6119 }
6120 break;
6121 }
6122 return Qundef;
6123}
6124
6125static VALUE
6126vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6127{
6128 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6129 return ary;
6130 }
6131 else {
6132 return Qundef;
6133 }
6134}
6135
6136static VALUE
6137vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6138{
6139 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6140 return hash;
6141 }
6142 else {
6143 return Qundef;
6144 }
6145}
6146
6147static VALUE
6148vm_opt_str_freeze(VALUE str, int bop, ID id)
6149{
6150 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6151 return str;
6152 }
6153 else {
6154 return Qundef;
6155 }
6156}
6157
6158/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6159#define id_cmp idCmp
6160
6161static VALUE
6162vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6163{
6164 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6165 return rb_ary_includes(ary, target);
6166 }
6167 else {
6168 VALUE args[1] = {target};
6169
6170 // duparray
6171 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6172 VALUE dupary = rb_ary_resurrect(ary);
6173
6174 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6175 }
6176}
6177
6178VALUE
6179rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6180{
6181 return vm_opt_duparray_include_p(ec, ary, target);
6182}
6183
6184static VALUE
6185vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6186{
6187 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6188 if (num == 0) {
6189 return Qnil;
6190 }
6191 else {
6192 VALUE result = *ptr;
6193 rb_snum_t i = num - 1;
6194 while (i-- > 0) {
6195 const VALUE v = *++ptr;
6196 if (OPTIMIZED_CMP(v, result) > 0) {
6197 result = v;
6198 }
6199 }
6200 return result;
6201 }
6202 }
6203 else {
6204 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6205 }
6206}
6207
6208VALUE
6209rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6210{
6211 return vm_opt_newarray_max(ec, num, ptr);
6212}
6213
6214static VALUE
6215vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6216{
6217 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6218 if (num == 0) {
6219 return Qnil;
6220 }
6221 else {
6222 VALUE result = *ptr;
6223 rb_snum_t i = num - 1;
6224 while (i-- > 0) {
6225 const VALUE v = *++ptr;
6226 if (OPTIMIZED_CMP(v, result) < 0) {
6227 result = v;
6228 }
6229 }
6230 return result;
6231 }
6232 }
6233 else {
6234 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6235 }
6236}
6237
6238VALUE
6239rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6240{
6241 return vm_opt_newarray_min(ec, num, ptr);
6242}
6243
6244static VALUE
6245vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6246{
6247 // If Array#hash is _not_ monkeypatched, use the optimized call
6248 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6249 return rb_ary_hash_values(num, ptr);
6250 }
6251 else {
6252 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6253 }
6254}
6255
6256VALUE
6257rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6258{
6259 return vm_opt_newarray_hash(ec, num, ptr);
6260}
6261
6262VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6263VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6264
6265static VALUE
6266vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6267{
6268 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6269 struct RArray fake_ary;
6270 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6271 return rb_ary_includes(ary, target);
6272 }
6273 else {
6274 VALUE args[1] = {target};
6275 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6276 }
6277}
6278
6279VALUE
6280rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6281{
6282 return vm_opt_newarray_include_p(ec, num, ptr, target);
6283}
6284
6285static VALUE
6286vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6287{
6288 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6289 struct RArray fake_ary;
6290 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6291 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6292 }
6293 else {
6294 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6295 // Setup an array with room for keyword hash.
6296 VALUE args[2];
6297 args[0] = fmt;
6298 int kw_splat = RB_NO_KEYWORDS;
6299 int argc = 1;
6300
6301 if (!UNDEF_P(buffer)) {
6302 args[1] = rb_hash_new_with_size(1);
6303 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6304 kw_splat = RB_PASS_KEYWORDS;
6305 argc++;
6306 }
6307
6308 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6309 }
6310}
6311
6312VALUE
6313rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6314{
6315 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6316}
6317
6318VALUE
6319rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6320{
6321 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6322}
6323
6324#undef id_cmp
6325
6326static void
6327vm_track_constant_cache(ID id, void *ic)
6328{
6329 rb_vm_t *vm = GET_VM();
6330 struct rb_id_table *const_cache = vm->constant_cache;
6331 VALUE lookup_result;
6332 st_table *ics;
6333
6334 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6335 ics = (st_table *)lookup_result;
6336 }
6337 else {
6338 ics = st_init_numtable();
6339 rb_id_table_insert(const_cache, id, (VALUE)ics);
6340 }
6341
6342 /* The call below to st_insert could allocate which could trigger a GC.
6343 * If it triggers a GC, it may free an iseq that also holds a cache to this
6344 * constant. If that iseq is the last iseq with a cache to this constant, then
6345 * it will free this ST table, which would cause an use-after-free during this
6346 * st_insert.
6347 *
6348 * So to fix this issue, we store the ID that is currently being inserted
6349 * and, in remove_from_constant_cache, we don't free the ST table for ID
6350 * equal to this one.
6351 *
6352 * See [Bug #20921].
6353 */
6354 vm->inserting_constant_cache_id = id;
6355
6356 st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
6357
6358 vm->inserting_constant_cache_id = (ID)0;
6359}
6360
6361static void
6362vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6363{
6364 RB_VM_LOCK_ENTER();
6365
6366 for (int i = 0; segments[i]; i++) {
6367 ID id = segments[i];
6368 if (id == idNULL) continue;
6369 vm_track_constant_cache(id, ic);
6370 }
6371
6372 RB_VM_LOCK_LEAVE();
6373}
6374
6375// For JIT inlining
6376static inline bool
6377vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6378{
6379 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6380 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6381
6382 return (ic_cref == NULL || // no need to check CREF
6383 ic_cref == vm_get_cref(reg_ep));
6384 }
6385 return false;
6386}
6387
6388static bool
6389vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6390{
6391 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6392 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6393}
6394
6395// YJIT needs this function to never allocate and never raise
6396bool
6397rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6398{
6399 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6400}
6401
6402static void
6403vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6404{
6405 if (ruby_vm_const_missing_count > 0) {
6406 ruby_vm_const_missing_count = 0;
6407 ic->entry = NULL;
6408 return;
6409 }
6410
6411 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6412 RB_OBJ_WRITE(ice, &ice->value, val);
6413 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6414 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6415 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6416
6417 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6418 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6419 rb_yjit_constant_ic_update(iseq, ic, pos);
6420}
6421
6422VALUE
6423rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6424{
6425 VALUE val;
6426 const ID *segments = ic->segments;
6427 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6428 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6429 val = ice->value;
6430
6431 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6432 }
6433 else {
6434 ruby_vm_constant_cache_misses++;
6435 val = vm_get_ev_const_chain(ec, segments);
6436 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6437 // Undo the PC increment to get the address to this instruction
6438 // INSN_ATTR(width) == 2
6439 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6440 }
6441 return val;
6442}
6443
6444static VALUE
6445vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6446{
6447 rb_thread_t *th = rb_ec_thread_ptr(ec);
6448 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6449
6450 again:
6451 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6452 return is->once.value;
6453 }
6454 else if (is->once.running_thread == NULL) {
6455 VALUE val;
6456 is->once.running_thread = th;
6457 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6458 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6459 /* is->once.running_thread is cleared by vm_once_clear() */
6460 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6461 return val;
6462 }
6463 else if (is->once.running_thread == th) {
6464 /* recursive once */
6465 return vm_once_exec((VALUE)iseq);
6466 }
6467 else {
6468 /* waiting for finish */
6469 RUBY_VM_CHECK_INTS(ec);
6471 goto again;
6472 }
6473}
6474
6475static OFFSET
6476vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6477{
6478 switch (OBJ_BUILTIN_TYPE(key)) {
6479 case -1:
6480 case T_FLOAT:
6481 case T_SYMBOL:
6482 case T_BIGNUM:
6483 case T_STRING:
6484 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6485 SYMBOL_REDEFINED_OP_FLAG |
6486 INTEGER_REDEFINED_OP_FLAG |
6487 FLOAT_REDEFINED_OP_FLAG |
6488 NIL_REDEFINED_OP_FLAG |
6489 TRUE_REDEFINED_OP_FLAG |
6490 FALSE_REDEFINED_OP_FLAG |
6491 STRING_REDEFINED_OP_FLAG)) {
6492 st_data_t val;
6493 if (RB_FLOAT_TYPE_P(key)) {
6494 double kval = RFLOAT_VALUE(key);
6495 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6496 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6497 }
6498 }
6499 if (rb_hash_stlike_lookup(hash, key, &val)) {
6500 return FIX2LONG((VALUE)val);
6501 }
6502 else {
6503 return else_offset;
6504 }
6505 }
6506 }
6507 return 0;
6508}
6509
6510NORETURN(static void
6511 vm_stack_consistency_error(const rb_execution_context_t *ec,
6512 const rb_control_frame_t *,
6513 const VALUE *));
6514static void
6515vm_stack_consistency_error(const rb_execution_context_t *ec,
6516 const rb_control_frame_t *cfp,
6517 const VALUE *bp)
6518{
6519 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6520 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6521 static const char stack_consistency_error[] =
6522 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6523#if defined RUBY_DEVEL
6524 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6525 rb_str_cat_cstr(mesg, "\n");
6526 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6528#else
6529 rb_bug(stack_consistency_error, nsp, nbp);
6530#endif
6531}
6532
6533static VALUE
6534vm_opt_plus(VALUE recv, VALUE obj)
6535{
6536 if (FIXNUM_2_P(recv, obj) &&
6537 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6538 return rb_fix_plus_fix(recv, obj);
6539 }
6540 else if (FLONUM_2_P(recv, obj) &&
6541 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6542 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6543 }
6544 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6545 return Qundef;
6546 }
6547 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6548 RBASIC_CLASS(obj) == rb_cFloat &&
6549 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6550 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6551 }
6552 else if (RBASIC_CLASS(recv) == rb_cString &&
6553 RBASIC_CLASS(obj) == rb_cString &&
6554 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6555 return rb_str_opt_plus(recv, obj);
6556 }
6557 else if (RBASIC_CLASS(recv) == rb_cArray &&
6558 RBASIC_CLASS(obj) == rb_cArray &&
6559 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6560 return rb_ary_plus(recv, obj);
6561 }
6562 else {
6563 return Qundef;
6564 }
6565}
6566
6567static VALUE
6568vm_opt_minus(VALUE recv, VALUE obj)
6569{
6570 if (FIXNUM_2_P(recv, obj) &&
6571 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6572 return rb_fix_minus_fix(recv, obj);
6573 }
6574 else if (FLONUM_2_P(recv, obj) &&
6575 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6576 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6577 }
6578 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6579 return Qundef;
6580 }
6581 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6582 RBASIC_CLASS(obj) == rb_cFloat &&
6583 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6584 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6585 }
6586 else {
6587 return Qundef;
6588 }
6589}
6590
6591static VALUE
6592vm_opt_mult(VALUE recv, VALUE obj)
6593{
6594 if (FIXNUM_2_P(recv, obj) &&
6595 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6596 return rb_fix_mul_fix(recv, obj);
6597 }
6598 else if (FLONUM_2_P(recv, obj) &&
6599 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6600 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6601 }
6602 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6603 return Qundef;
6604 }
6605 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6606 RBASIC_CLASS(obj) == rb_cFloat &&
6607 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6608 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6609 }
6610 else {
6611 return Qundef;
6612 }
6613}
6614
6615static VALUE
6616vm_opt_div(VALUE recv, VALUE obj)
6617{
6618 if (FIXNUM_2_P(recv, obj) &&
6619 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6620 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6621 }
6622 else if (FLONUM_2_P(recv, obj) &&
6623 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6624 return rb_flo_div_flo(recv, obj);
6625 }
6626 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6627 return Qundef;
6628 }
6629 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6630 RBASIC_CLASS(obj) == rb_cFloat &&
6631 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6632 return rb_flo_div_flo(recv, obj);
6633 }
6634 else {
6635 return Qundef;
6636 }
6637}
6638
6639static VALUE
6640vm_opt_mod(VALUE recv, VALUE obj)
6641{
6642 if (FIXNUM_2_P(recv, obj) &&
6643 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6644 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6645 }
6646 else if (FLONUM_2_P(recv, obj) &&
6647 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6648 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6649 }
6650 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6651 return Qundef;
6652 }
6653 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6654 RBASIC_CLASS(obj) == rb_cFloat &&
6655 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6656 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6657 }
6658 else {
6659 return Qundef;
6660 }
6661}
6662
6663static VALUE
6664vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6665{
6666 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6667 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6668
6669 if (!UNDEF_P(val)) {
6670 return RBOOL(!RTEST(val));
6671 }
6672 }
6673
6674 return Qundef;
6675}
6676
6677static VALUE
6678vm_opt_lt(VALUE recv, VALUE obj)
6679{
6680 if (FIXNUM_2_P(recv, obj) &&
6681 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6682 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6683 }
6684 else if (FLONUM_2_P(recv, obj) &&
6685 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6686 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6687 }
6688 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6689 return Qundef;
6690 }
6691 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6692 RBASIC_CLASS(obj) == rb_cFloat &&
6693 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6694 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6695 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6696 }
6697 else {
6698 return Qundef;
6699 }
6700}
6701
6702static VALUE
6703vm_opt_le(VALUE recv, VALUE obj)
6704{
6705 if (FIXNUM_2_P(recv, obj) &&
6706 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6707 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6708 }
6709 else if (FLONUM_2_P(recv, obj) &&
6710 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6711 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6712 }
6713 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6714 return Qundef;
6715 }
6716 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6717 RBASIC_CLASS(obj) == rb_cFloat &&
6718 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6719 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6720 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6721 }
6722 else {
6723 return Qundef;
6724 }
6725}
6726
6727static VALUE
6728vm_opt_gt(VALUE recv, VALUE obj)
6729{
6730 if (FIXNUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6732 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6733 }
6734 else if (FLONUM_2_P(recv, obj) &&
6735 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6736 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6737 }
6738 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6739 return Qundef;
6740 }
6741 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6742 RBASIC_CLASS(obj) == rb_cFloat &&
6743 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6744 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6745 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6746 }
6747 else {
6748 return Qundef;
6749 }
6750}
6751
6752static VALUE
6753vm_opt_ge(VALUE recv, VALUE obj)
6754{
6755 if (FIXNUM_2_P(recv, obj) &&
6756 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6757 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6758 }
6759 else if (FLONUM_2_P(recv, obj) &&
6760 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6761 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6762 }
6763 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6764 return Qundef;
6765 }
6766 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6767 RBASIC_CLASS(obj) == rb_cFloat &&
6768 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6769 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6770 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6771 }
6772 else {
6773 return Qundef;
6774 }
6775}
6776
6777
6778static VALUE
6779vm_opt_ltlt(VALUE recv, VALUE obj)
6780{
6781 if (SPECIAL_CONST_P(recv)) {
6782 return Qundef;
6783 }
6784 else if (RBASIC_CLASS(recv) == rb_cString &&
6785 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6786 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6787 return rb_str_buf_append(recv, obj);
6788 }
6789 else {
6790 return rb_str_concat(recv, obj);
6791 }
6792 }
6793 else if (RBASIC_CLASS(recv) == rb_cArray &&
6794 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6795 return rb_ary_push(recv, obj);
6796 }
6797 else {
6798 return Qundef;
6799 }
6800}
6801
6802static VALUE
6803vm_opt_and(VALUE recv, VALUE obj)
6804{
6805 // If recv and obj are both fixnums, then the bottom tag bit
6806 // will be 1 on both. 1 & 1 == 1, so the result value will also
6807 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6808 // will be 0, and we return Qundef.
6809 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6810
6811 if (FIXNUM_P(ret) &&
6812 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6813 return ret;
6814 }
6815 else {
6816 return Qundef;
6817 }
6818}
6819
6820static VALUE
6821vm_opt_or(VALUE recv, VALUE obj)
6822{
6823 if (FIXNUM_2_P(recv, obj) &&
6824 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6825 return recv | obj;
6826 }
6827 else {
6828 return Qundef;
6829 }
6830}
6831
6832static VALUE
6833vm_opt_aref(VALUE recv, VALUE obj)
6834{
6835 if (SPECIAL_CONST_P(recv)) {
6836 if (FIXNUM_2_P(recv, obj) &&
6837 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6838 return rb_fix_aref(recv, obj);
6839 }
6840 return Qundef;
6841 }
6842 else if (RBASIC_CLASS(recv) == rb_cArray &&
6843 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6844 if (FIXNUM_P(obj)) {
6845 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6846 }
6847 else {
6848 return rb_ary_aref1(recv, obj);
6849 }
6850 }
6851 else if (RBASIC_CLASS(recv) == rb_cHash &&
6852 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6853 return rb_hash_aref(recv, obj);
6854 }
6855 else {
6856 return Qundef;
6857 }
6858}
6859
6860static VALUE
6861vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6862{
6863 if (SPECIAL_CONST_P(recv)) {
6864 return Qundef;
6865 }
6866 else if (RBASIC_CLASS(recv) == rb_cArray &&
6867 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6868 FIXNUM_P(obj)) {
6869 rb_ary_store(recv, FIX2LONG(obj), set);
6870 return set;
6871 }
6872 else if (RBASIC_CLASS(recv) == rb_cHash &&
6873 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6874 rb_hash_aset(recv, obj, set);
6875 return set;
6876 }
6877 else {
6878 return Qundef;
6879 }
6880}
6881
6882static VALUE
6883vm_opt_aref_with(VALUE recv, VALUE key)
6884{
6885 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6886 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6887 rb_hash_compare_by_id_p(recv) == Qfalse &&
6888 !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6889 return rb_hash_aref(recv, key);
6890 }
6891 else {
6892 return Qundef;
6893 }
6894}
6895
6896VALUE
6897rb_vm_opt_aref_with(VALUE recv, VALUE key)
6898{
6899 return vm_opt_aref_with(recv, key);
6900}
6901
6902static VALUE
6903vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6904{
6905 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6906 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6907 rb_hash_compare_by_id_p(recv) == Qfalse) {
6908 return rb_hash_aset(recv, key, val);
6909 }
6910 else {
6911 return Qundef;
6912 }
6913}
6914
6915static VALUE
6916vm_opt_length(VALUE recv, int bop)
6917{
6918 if (SPECIAL_CONST_P(recv)) {
6919 return Qundef;
6920 }
6921 else if (RBASIC_CLASS(recv) == rb_cString &&
6922 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6923 if (bop == BOP_EMPTY_P) {
6924 return LONG2NUM(RSTRING_LEN(recv));
6925 }
6926 else {
6927 return rb_str_length(recv);
6928 }
6929 }
6930 else if (RBASIC_CLASS(recv) == rb_cArray &&
6931 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6932 return LONG2NUM(RARRAY_LEN(recv));
6933 }
6934 else if (RBASIC_CLASS(recv) == rb_cHash &&
6935 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6936 return INT2FIX(RHASH_SIZE(recv));
6937 }
6938 else {
6939 return Qundef;
6940 }
6941}
6942
6943static VALUE
6944vm_opt_empty_p(VALUE recv)
6945{
6946 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6947 case Qundef: return Qundef;
6948 case INT2FIX(0): return Qtrue;
6949 default: return Qfalse;
6950 }
6951}
6952
6953VALUE rb_false(VALUE obj);
6954
6955static VALUE
6956vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6957{
6958 if (NIL_P(recv) &&
6959 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6960 return Qtrue;
6961 }
6962 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6963 return Qfalse;
6964 }
6965 else {
6966 return Qundef;
6967 }
6968}
6969
6970static VALUE
6971fix_succ(VALUE x)
6972{
6973 switch (x) {
6974 case ~0UL:
6975 /* 0xFFFF_FFFF == INT2FIX(-1)
6976 * `-1.succ` is of course 0. */
6977 return INT2FIX(0);
6978 case RSHIFT(~0UL, 1):
6979 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6980 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6981 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6982 default:
6983 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6984 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6985 * == lx*2 + ly*2 + 1
6986 * == (lx*2+1) + (ly*2+1) - 1
6987 * == x + y - 1
6988 *
6989 * Here, if we put y := INT2FIX(1):
6990 *
6991 * == x + INT2FIX(1) - 1
6992 * == x + 2 .
6993 */
6994 return x + 2;
6995 }
6996}
6997
6998static VALUE
6999vm_opt_succ(VALUE recv)
7000{
7001 if (FIXNUM_P(recv) &&
7002 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7003 return fix_succ(recv);
7004 }
7005 else if (SPECIAL_CONST_P(recv)) {
7006 return Qundef;
7007 }
7008 else if (RBASIC_CLASS(recv) == rb_cString &&
7009 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7010 return rb_str_succ(recv);
7011 }
7012 else {
7013 return Qundef;
7014 }
7015}
7016
7017static VALUE
7018vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7019{
7020 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7021 return RBOOL(!RTEST(recv));
7022 }
7023 else {
7024 return Qundef;
7025 }
7026}
7027
7028static VALUE
7029vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7030{
7031 if (SPECIAL_CONST_P(recv)) {
7032 return Qundef;
7033 }
7034 else if (RBASIC_CLASS(recv) == rb_cString &&
7035 CLASS_OF(obj) == rb_cRegexp &&
7036 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7037 return rb_reg_match(obj, recv);
7038 }
7039 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7040 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7041 return rb_reg_match(recv, obj);
7042 }
7043 else {
7044 return Qundef;
7045 }
7046}
7047
7048rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7049
7050NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7051
7052static inline void
7053vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7054 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7055 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7056{
7057 rb_event_flag_t event = pc_events & target_event;
7058 VALUE self = GET_SELF();
7059
7060 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7061
7062 if (event & global_hooks->events) {
7063 /* increment PC because source line is calculated with PC-1 */
7064 reg_cfp->pc++;
7065 vm_dtrace(event, ec);
7066 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7067 reg_cfp->pc--;
7068 }
7069
7070 // Load here since global hook above can add and free local hooks
7071 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7072 if (local_hooks != NULL) {
7073 if (event & local_hooks->events) {
7074 /* increment PC because source line is calculated with PC-1 */
7075 reg_cfp->pc++;
7076 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7077 reg_cfp->pc--;
7078 }
7079 }
7080}
7081
7082#define VM_TRACE_HOOK(target_event, val) do { \
7083 if ((pc_events & (target_event)) & enabled_flags) { \
7084 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7085 } \
7086} while (0)
7087
7088static VALUE
7089rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7090{
7091 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7092 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7093 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7094}
7095
7096static void
7097vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7098{
7099 const VALUE *pc = reg_cfp->pc;
7100 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7101 rb_event_flag_t global_events = enabled_flags;
7102
7103 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7104 return;
7105 }
7106 else {
7107 const rb_iseq_t *iseq = reg_cfp->iseq;
7108 VALUE iseq_val = (VALUE)iseq;
7109 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7110 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7111 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7112 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7113 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7114 rb_hook_list_t *bmethod_local_hooks = NULL;
7115 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7116 rb_event_flag_t bmethod_local_events = 0;
7117 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7118 enabled_flags |= iseq_local_events;
7119
7120 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7121
7122 if (bmethod_frame) {
7123 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7124 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7125 bmethod_local_hooks = me->def->body.bmethod.hooks;
7126 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7127 if (bmethod_local_hooks) {
7128 bmethod_local_events = bmethod_local_hooks->events;
7129 }
7130 }
7131
7132
7133 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7134#if 0
7135 /* disable trace */
7136 /* TODO: incomplete */
7137 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7138#else
7139 /* do not disable trace because of performance problem
7140 * (re-enable overhead)
7141 */
7142#endif
7143 return;
7144 }
7145 else if (ec->trace_arg != NULL) {
7146 /* already tracing */
7147 return;
7148 }
7149 else {
7150 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7151 /* Note, not considering iseq local events here since the same
7152 * iseq could be used in multiple bmethods. */
7153 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7154
7155 if (0) {
7156 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7157 (int)pos,
7158 (int)pc_events,
7159 RSTRING_PTR(rb_iseq_path(iseq)),
7160 (int)rb_iseq_line_no(iseq, pos),
7161 RSTRING_PTR(rb_iseq_label(iseq)));
7162 }
7163 VM_ASSERT(reg_cfp->pc == pc);
7164 VM_ASSERT(pc_events != 0);
7165
7166 /* check traces */
7167 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7168 /* b_call instruction running as a method. Fire call event. */
7169 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7170 }
7172 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7173 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7174 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7175 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7176 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7177 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7178 /* b_return instruction running as a method. Fire return event. */
7179 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7180 }
7181
7182 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7183 // We need the pointer to stay valid in case compaction happens in a trace hook.
7184 //
7185 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7186 // storage for `rb_method_definition_t` is not on the GC heap.
7187 RB_GC_GUARD(iseq_val);
7188 }
7189 }
7190}
7191#undef VM_TRACE_HOOK
7192
7193#if VM_CHECK_MODE > 0
7194NORETURN( NOINLINE( COLDFUNC
7195void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7196
7197void
7198Init_vm_stack_canary(void)
7199{
7200 /* This has to be called _after_ our PRNG is properly set up. */
7201 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7202 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7203
7204 vm_stack_canary_was_born = true;
7205 VM_ASSERT(n == 0);
7206}
7207
7208void
7209rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7210{
7211 /* Because a method has already been called, why not call
7212 * another one. */
7213 const char *insn = rb_insns_name(i);
7214 VALUE inspection = rb_inspect(c);
7215 const char *str = StringValueCStr(inspection);
7216
7217 rb_bug("dead canary found at %s: %s", insn, str);
7218}
7219
7220#else
7221void Init_vm_stack_canary(void) { /* nothing to do */ }
7222#endif
7223
7224
7225/* a part of the following code is generated by this ruby script:
7226
722716.times{|i|
7228 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7229 typedef_args.prepend(", ") if i != 0
7230 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7231 call_args.prepend(", ") if i != 0
7232 puts %Q{
7233static VALUE
7234builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7235{
7236 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7237 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7238}}
7239}
7240
7241puts
7242puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
724316.times{|i|
7244 puts " builtin_invoker#{i},"
7245}
7246puts "};"
7247*/
7248
7249static VALUE
7250builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7251{
7252 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7253 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7254}
7255
7256static VALUE
7257builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7258{
7259 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7260 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7261}
7262
7263static VALUE
7264builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7265{
7266 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7267 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7268}
7269
7270static VALUE
7271builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7272{
7273 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7274 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7275}
7276
7277static VALUE
7278builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7279{
7280 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7281 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7282}
7283
7284static VALUE
7285builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7286{
7287 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7288 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7289}
7290
7291static VALUE
7292builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7293{
7294 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7295 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7296}
7297
7298static VALUE
7299builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7300{
7301 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7302 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7303}
7304
7305static VALUE
7306builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7307{
7308 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7309 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7310}
7311
7312static VALUE
7313builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7314{
7315 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7316 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7317}
7318
7319static VALUE
7320builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7321{
7322 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7323 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7324}
7325
7326static VALUE
7327builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7328{
7329 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7330 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7331}
7332
7333static VALUE
7334builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7335{
7336 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7337 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7338}
7339
7340static VALUE
7341builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7342{
7343 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7344 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7345}
7346
7347static VALUE
7348builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7349{
7350 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7351 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7352}
7353
7354static VALUE
7355builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7356{
7357 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7358 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7359}
7360
7361typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7362
7363static builtin_invoker
7364lookup_builtin_invoker(int argc)
7365{
7366 static const builtin_invoker invokers[] = {
7367 builtin_invoker0,
7368 builtin_invoker1,
7369 builtin_invoker2,
7370 builtin_invoker3,
7371 builtin_invoker4,
7372 builtin_invoker5,
7373 builtin_invoker6,
7374 builtin_invoker7,
7375 builtin_invoker8,
7376 builtin_invoker9,
7377 builtin_invoker10,
7378 builtin_invoker11,
7379 builtin_invoker12,
7380 builtin_invoker13,
7381 builtin_invoker14,
7382 builtin_invoker15,
7383 };
7384
7385 return invokers[argc];
7386}
7387
7388static inline VALUE
7389invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7390{
7391 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7392 SETUP_CANARY(canary_p);
7393 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7394 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7395 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7396 return ret;
7397}
7398
7399static VALUE
7400vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7401{
7402 return invoke_bf(ec, cfp, bf, argv);
7403}
7404
7405static VALUE
7406vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7407{
7408 if (0) { // debug print
7409 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7410 for (int i=0; i<bf->argc; i++) {
7411 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7412 }
7413 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7414 (void *)(uintptr_t)bf->func_ptr);
7415 }
7416
7417 if (bf->argc == 0) {
7418 return invoke_bf(ec, cfp, bf, NULL);
7419 }
7420 else {
7421 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7422 return invoke_bf(ec, cfp, bf, argv);
7423 }
7424}
7425
7426// for __builtin_inline!()
7427
7428VALUE
7429rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7430{
7431 const rb_control_frame_t *cfp = ec->cfp;
7432 return cfp->ep[index];
7433}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2297
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1076
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:971
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:950
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:130
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3836
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:675
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:688
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4157
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:68
VALUE rb_cArray
Array class.
Definition array.c:40
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2097
VALUE rb_cRegexp
Regexp class.
Definition re.c:2640
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1272
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:680
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_cModule
Module class.
Definition object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:44
VALUE rb_cString
String class.
Definition string.c:79
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1027
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1930
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3695
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1905
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:1987
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1888
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1954
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2020
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3677
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5269
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3643
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3919
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2353
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:894
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1477
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3135
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1844
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:3927
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:3982
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1335
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3604
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:2970
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:130
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3141
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:336
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:1861
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3463
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4004
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:293
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3457
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1290
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:1823
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:986
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:270
Definition vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition class.h:36
Definition method.h:54
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:48
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:50
THROW_DATA.
Definition imemo.h:57
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376