Ruby 4.1.0dev (2026-05-01 revision 1546943ea622a07269abc75ec98706e8ba73386f)
vm_insnhelper.c (1546943ea622a07269abc75ec98706e8ba73386f)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions. Included into vm.c.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 ._iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (CREF_DYNAMIC(cref) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref))) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* no dynamic singleton class or cloned class found */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%"PRIsVALUE" by non-main ractor.", rb_class_path(klass), rb_id2str(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 VALUE rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_marked_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 RB_OBJ_WRITE((VALUE)ent, &ent->cref, cref);
1621 RB_OBJ_WRITE(iseq, &ic->entry, ent);
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624
1625 return cvar_value;
1626}
1627
1628static inline VALUE
1629vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1630{
1631 const rb_cref_t *cref;
1632 cref = vm_get_cref(GET_EP());
1633
1634 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1635 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1636
1637 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1638 RUBY_ASSERT(!UNDEF_P(v));
1639
1640 return v;
1641 }
1642
1643 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1644
1645 return update_classvariable_cache(iseq, klass, id, cref, ic);
1646}
1647
1648VALUE
1649rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1650{
1651 return vm_getclassvariable(iseq, cfp, id, ic);
1652}
1653
1654static inline void
1655vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1656{
1657 const rb_cref_t *cref;
1658 cref = vm_get_cref(GET_EP());
1659
1660 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1661 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1662
1663 rb_class_ivar_set(ic->entry->class_value, id, val);
1664 return;
1665 }
1666
1667 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1668
1669 rb_cvar_set(klass, id, val);
1670
1671 update_classvariable_cache(iseq, klass, id, cref, ic);
1672}
1673
1674void
1675rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1676{
1677 vm_setclassvariable(iseq, cfp, id, val, ic);
1678}
1679
1680ALWAYS_INLINE(static VALUE vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic));
1681static inline VALUE
1682vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1683{
1684 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1685}
1686
1687static inline void
1688vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1689{
1690 if (RB_SPECIAL_CONST_P(obj)) {
1692 return;
1693 }
1694
1695 shape_id_t dest_shape_id;
1696 attr_index_t index;
1697 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1698
1699 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1700 switch (BUILTIN_TYPE(obj)) {
1701 case T_OBJECT:
1702 break;
1703 case T_CLASS:
1704 case T_MODULE:
1705 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1706 return;
1707 }
1708 break;
1709 default:
1710 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1711 return;
1712 }
1713 }
1714 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1715 }
1716}
1717
1718void
1719rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1720{
1721 vm_setinstancevariable(iseq, obj, id, val, ic);
1722}
1723
1724VALUE
1725rb_vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1726{
1727 return vm_getinstancevariable(iseq, obj, id, ic);
1728}
1729
1730static VALUE
1731vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1732{
1733 /* continue throw */
1734
1735 if (FIXNUM_P(err)) {
1736 ec->tag->state = RUBY_TAG_FATAL;
1737 }
1738 else if (SYMBOL_P(err)) {
1739 ec->tag->state = TAG_THROW;
1740 }
1741 else if (THROW_DATA_P(err)) {
1742 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1743 }
1744 else {
1745 ec->tag->state = TAG_RAISE;
1746 }
1747 return err;
1748}
1749
1750static VALUE
1751vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1752 const int flag, const VALUE throwobj)
1753{
1754 const rb_control_frame_t *escape_cfp = NULL;
1755 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1756
1757 if (flag != 0) {
1758 /* do nothing */
1759 }
1760 else if (state == TAG_BREAK) {
1761 int is_orphan = 1;
1762 const VALUE *ep = GET_EP();
1763 const rb_iseq_t *base_iseq = GET_ISEQ();
1764 escape_cfp = reg_cfp;
1765
1766 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1767 if (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1768 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1769 ep = escape_cfp->ep;
1770 base_iseq = CFP_ISEQ(escape_cfp);
1771 }
1772 else {
1773 ep = VM_ENV_PREV_EP(ep);
1774 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1775 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1776 VM_ASSERT(CFP_ISEQ(escape_cfp) == base_iseq);
1777 }
1778 }
1779
1780 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1781 /* lambda{... break ...} */
1782 is_orphan = 0;
1783 state = TAG_RETURN;
1784 }
1785 else {
1786 ep = VM_ENV_PREV_EP(ep);
1787
1788 while (escape_cfp < eocfp) {
1789 if (escape_cfp->ep == ep) {
1790 const rb_iseq_t *const iseq = CFP_ISEQ(escape_cfp);
1791 const VALUE epc = CFP_PC(escape_cfp) - ISEQ_BODY(iseq)->iseq_encoded;
1792 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1793 unsigned int i;
1794
1795 if (!ct) break;
1796 for (i=0; i < ct->size; i++) {
1797 const struct iseq_catch_table_entry *const entry =
1798 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1799
1800 if (entry->type == CATCH_TYPE_BREAK &&
1801 entry->iseq == base_iseq &&
1802 entry->start < epc && entry->end >= epc) {
1803 if (entry->cont == epc) { /* found! */
1804 is_orphan = 0;
1805 }
1806 break;
1807 }
1808 }
1809 break;
1810 }
1811
1812 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1813 }
1814 }
1815
1816 if (is_orphan) {
1817 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1818 }
1819 }
1820 else if (state == TAG_RETRY) {
1821 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1822
1823 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1824 }
1825 else if (state == TAG_RETURN) {
1826 const VALUE *current_ep = GET_EP();
1827 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1828 int in_class_frame = 0;
1829 int toplevel = 1;
1830 escape_cfp = reg_cfp;
1831
1832 // find target_lep, target_ep
1833 while (!VM_ENV_LOCAL_P(ep)) {
1834 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1835 target_ep = ep;
1836 }
1837 ep = VM_ENV_PREV_EP(ep);
1838 }
1839 target_lep = ep;
1840
1841 while (escape_cfp < eocfp) {
1842 const VALUE *lep = VM_CF_LEP(escape_cfp);
1843
1844 if (!target_lep) {
1845 target_lep = lep;
1846 }
1847
1848 if (lep == target_lep &&
1849 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1850 ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1851 in_class_frame = 1;
1852 target_lep = 0;
1853 }
1854
1855 if (lep == target_lep) {
1856 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1857 toplevel = 0;
1858 if (in_class_frame) {
1859 /* lambda {class A; ... return ...; end} */
1860 goto valid_return;
1861 }
1862 else {
1863 const VALUE *tep = current_ep;
1864
1865 while (target_lep != tep) {
1866 if (escape_cfp->ep == tep) {
1867 /* in lambda */
1868 if (tep == target_ep) {
1869 goto valid_return;
1870 }
1871 else {
1872 goto unexpected_return;
1873 }
1874 }
1875 tep = VM_ENV_PREV_EP(tep);
1876 }
1877 }
1878 }
1879 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1880 switch (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type) {
1881 case ISEQ_TYPE_TOP:
1882 case ISEQ_TYPE_MAIN:
1883 if (toplevel) {
1884 if (in_class_frame) goto unexpected_return;
1885 if (target_ep == NULL) {
1886 goto valid_return;
1887 }
1888 else {
1889 goto unexpected_return;
1890 }
1891 }
1892 break;
1893 case ISEQ_TYPE_EVAL: {
1894 const rb_iseq_t *is = CFP_ISEQ(escape_cfp);
1895 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1896 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1897 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1898 t = ISEQ_BODY(is)->type;
1899 }
1900 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1901 break;
1902 }
1903 case ISEQ_TYPE_CLASS:
1904 toplevel = 0;
1905 break;
1906 default:
1907 break;
1908 }
1909 }
1910 }
1911
1912 if (escape_cfp->ep == target_lep && ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_METHOD) {
1913 if (target_ep == NULL) {
1914 goto valid_return;
1915 }
1916 else {
1917 goto unexpected_return;
1918 }
1919 }
1920
1921 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1922 }
1923 unexpected_return:;
1924 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1925
1926 valid_return:;
1927 /* do nothing */
1928 }
1929 else {
1930 rb_bug("isns(throw): unsupported throw type");
1931 }
1932
1933 ec->tag->state = state;
1934 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1935}
1936
1937static VALUE
1938vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1939 rb_num_t throw_state, VALUE throwobj)
1940{
1941 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1942 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1943
1944 if (state != 0) {
1945 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1946 }
1947 else {
1948 return vm_throw_continue(ec, throwobj);
1949 }
1950}
1951
1952VALUE
1953rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1954{
1955 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1956}
1957
1958static inline void
1959vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1960{
1961 int is_splat = flag & 0x01;
1962 const VALUE *ptr;
1963 rb_num_t len;
1964 const VALUE obj = ary;
1965
1966 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1967 ary = obj;
1968 ptr = &ary;
1969 len = 1;
1970 }
1971 else {
1972 ptr = RARRAY_CONST_PTR(ary);
1973 len = (rb_num_t)RARRAY_LEN(ary);
1974 }
1975
1976 if (num + is_splat == 0) {
1977 /* no space left on stack */
1978 }
1979 else if (flag & 0x02) {
1980 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1981 rb_num_t i = 0, j;
1982
1983 if (len < num) {
1984 for (i = 0; i < num - len; i++) {
1985 *cfp->sp++ = Qnil;
1986 }
1987 }
1988
1989 for (j = 0; i < num; i++, j++) {
1990 VALUE v = ptr[len - j - 1];
1991 *cfp->sp++ = v;
1992 }
1993
1994 if (is_splat) {
1995 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1996 }
1997 }
1998 else {
1999 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
2000 if (is_splat) {
2001 if (num > len) {
2002 *cfp->sp++ = rb_ary_new();
2003 }
2004 else {
2005 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2006 }
2007 }
2008
2009 if (num > len) {
2010 rb_num_t i = 0;
2011 for (; i < num - len; i++) {
2012 *cfp->sp++ = Qnil;
2013 }
2014
2015 for (rb_num_t j = 0; i < num; i++, j++) {
2016 *cfp->sp++ = ptr[len - j - 1];
2017 }
2018 }
2019 else {
2020 for (rb_num_t j = 0; j < num; j++) {
2021 *cfp->sp++ = ptr[num - j - 1];
2022 }
2023 }
2024 }
2025
2026 RB_GC_GUARD(ary);
2027}
2028
2029static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2030
2031static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2032
2033static struct rb_class_cc_entries *
2034vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2035{
2036 int initial_capa = 2;
2037 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2038#if VM_CHECK_MODE > 0
2039 ccs->debug_sig = ~(VALUE)ccs;
2040#endif
2041 ccs->capa = initial_capa;
2042 ccs->len = 0;
2043 ccs->cme = cme;
2044 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2045
2046 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2047 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2048 return ccs;
2049}
2050
2051static void
2052vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2053{
2054 if (! vm_cc_markable(cc)) {
2055 return;
2056 }
2057
2058 if (UNLIKELY(ccs->len == ccs->capa)) {
2059 RUBY_ASSERT(ccs->capa > 0);
2060 ccs->capa *= 2;
2061 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2062#if VM_CHECK_MODE > 0
2063 ccs->debug_sig = ~(VALUE)ccs;
2064#endif
2065 // GC?
2066 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2067 }
2068 VM_ASSERT(ccs->len < ccs->capa);
2069
2070 const int pos = ccs->len++;
2071 ccs->entries[pos].argc = vm_ci_argc(ci);
2072 ccs->entries[pos].flag = vm_ci_flag(ci);
2073 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2074
2075 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2076 // for tuning
2077 // vm_mtbl_dump(klass, 0);
2078 }
2079}
2080
2081#if VM_CHECK_MODE > 0
2082void
2083rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2084{
2085 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2086 for (int i=0; i<ccs->len; i++) {
2087 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2088 ccs->entries[i].flag,
2089 ccs->entries[i].argc);
2090 rp(ccs->entries[i].cc);
2091 }
2092}
2093
2094static int
2095vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2096{
2097 VM_ASSERT(vm_ccs_p(ccs));
2098 VM_ASSERT(ccs->len <= ccs->capa);
2099
2100 for (int i=0; i<ccs->len; i++) {
2101 const struct rb_callcache *cc = ccs->entries[i].cc;
2102
2103 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2104 VM_ASSERT(vm_cc_class_check(cc, klass));
2105 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2106 VM_ASSERT(!vm_cc_super_p(cc));
2107 VM_ASSERT(!vm_cc_refinement_p(cc));
2108 }
2109 return TRUE;
2110}
2111#endif
2112
2113const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2114
2115static void
2116vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2117{
2118 ASSERT_vm_locking();
2119
2120 if (rb_multi_ractor_p()) {
2121 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2122 // Another ractor updated the CC table while we were waiting on the VM lock.
2123 // We have to retry.
2124 return;
2125 }
2126
2127 VALUE ccs_obj = 0;
2128 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2129 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2130
2131 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2132 // Another ractor replaced that entry while we were waiting on the VM lock.
2133 return;
2134 }
2135
2136 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2137 rb_vm_cc_table_delete(new_table, mid);
2138 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2139 }
2140 else {
2141 rb_vm_cc_table_delete(cc_tbl, mid);
2142 }
2143}
2144
2145static const struct rb_callcache *
2146vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2147{
2148 ASSERT_vm_locking();
2149
2150 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2151
2152 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
2153
2154 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2155
2156 if (cme == NULL) {
2157 // undef or not found: can't cache the information
2158 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2159 return &vm_empty_cc;
2160 }
2161
2162 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2163 const VALUE original_cc_table = cc_tbl;
2164 if (!cc_tbl) {
2165 // Is this possible after rb_callable_method_entry ?
2166 cc_tbl = rb_vm_cc_table_create(1);
2167 }
2168 else if (rb_multi_ractor_p()) {
2169 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2170 }
2171
2172 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2173
2174 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2175
2176 VM_ASSERT(cc_tbl);
2177
2178 struct rb_class_cc_entries *ccs = NULL;
2179 {
2180 VALUE ccs_obj;
2181 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2182 ccs = (struct rb_class_cc_entries *)ccs_obj;
2183 }
2184 else {
2185 // TODO: required?
2186 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2187 }
2188 }
2189
2190 cme = rb_check_overloaded_cme(cme, ci);
2191
2192 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2193 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2194
2195 VM_ASSERT(vm_cc_cme(cc) != NULL);
2196 VM_ASSERT(cme->called_id == mid);
2197 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2198
2199 if (original_cc_table != cc_tbl) {
2200 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2201 }
2202
2203 return cc;
2204}
2205
2206static const struct rb_callcache *
2207vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2208{
2209 VALUE cc_tbl;
2210 struct rb_class_cc_entries *ccs;
2211retry:
2212 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2213 ccs = NULL;
2214
2215 if (cc_tbl) {
2216 // CCS data is keyed on method id, so we don't need the method id
2217 // for doing comparisons in the `for` loop below.
2218
2219 VALUE ccs_obj;
2220 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2221 ccs = (struct rb_class_cc_entries *)ccs_obj;
2222 const int ccs_len = ccs->len;
2223
2224 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2225 RB_VM_LOCKING() {
2226 vm_evict_cc(klass, cc_tbl, mid);
2227 }
2228 goto retry;
2229 }
2230 else {
2231 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2232
2233 // We already know the method id is correct because we had
2234 // to look up the ccs_data by method id. All we need to
2235 // compare is argc and flag
2236 unsigned int argc = vm_ci_argc(ci);
2237 unsigned int flag = vm_ci_flag(ci);
2238
2239 for (int i=0; i<ccs_len; i++) {
2240 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2241 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2242 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2243
2244 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2245
2246 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2247 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2248
2249 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2250 VM_ASSERT(ccs_cc->klass == klass);
2251 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2252
2253 return ccs_cc;
2254 }
2255 }
2256 }
2257 }
2258 }
2259
2260 RB_GC_GUARD(cc_tbl);
2261 return NULL;
2262}
2263
2264static const struct rb_callcache *
2265vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2266{
2267 const ID mid = vm_ci_mid(ci);
2268
2269 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2270 if (cc) {
2271 return cc;
2272 }
2273
2274 RB_VM_LOCKING() {
2275 if (rb_multi_ractor_p()) {
2276 // The CC may have been populated by another ractor while we were waiting on the lock,
2277 // so we must lookup a second time.
2278 cc = vm_lookup_cc(klass, ci, mid);
2279 }
2280
2281 if (!cc) {
2282 cc = vm_populate_cc(klass, ci, mid);
2283 }
2284 }
2285
2286 return cc;
2287}
2288
2289const struct rb_callcache *
2290rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2291{
2292 const struct rb_callcache *cc;
2293
2294 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2295
2296 cc = vm_search_cc(klass, ci);
2297
2298 VM_ASSERT(cc);
2299 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2300 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2301 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2302 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2303 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2304
2305 return cc;
2306}
2307
2308static const struct rb_callcache *
2309vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2310{
2311#if USE_DEBUG_COUNTER
2312 const struct rb_callcache *old_cc = cd->cc;
2313#endif
2314
2315 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2316
2317#if OPT_INLINE_METHOD_CACHE
2318 cd->cc = cc;
2319
2320 const struct rb_callcache *empty_cc = &vm_empty_cc;
2321 if (cd_owner && cc != empty_cc) {
2322 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2323 }
2324
2325#if USE_DEBUG_COUNTER
2326 if (!old_cc || old_cc == empty_cc) {
2327 // empty
2328 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2329 }
2330 else if (old_cc == cc) {
2331 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2332 }
2333 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2334 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2335 }
2336 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2337 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2338 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2339 }
2340 else {
2341 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2342 }
2343#endif
2344#endif // OPT_INLINE_METHOD_CACHE
2345
2346 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2347 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2348
2349 return cc;
2350}
2351
2352ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass));
2353static const struct rb_callcache *
2354vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass)
2355{
2356 const struct rb_callcache *cc = cd->cc;
2357
2358#if OPT_INLINE_METHOD_CACHE
2359 if (LIKELY(vm_cc_class_check(cc, klass))) {
2360 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2361 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2362 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2363 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2364 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2365 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2366
2367 return cc;
2368 }
2369 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2370 }
2371 else {
2372 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2373 }
2374#endif
2375
2376 return vm_search_method_slowpath0((VALUE)CFP_ISEQ(reg_cfp), cd, klass);
2377}
2378
2379static const struct rb_callable_method_entry_struct *
2380vm_search_method(struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
2381{
2382 VALUE klass = CLASS_OF(recv);
2383 VM_ASSERT(klass != Qfalse);
2384 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2385
2386 const struct rb_callcache *cc = vm_search_method_fastpath(reg_cfp, cd, klass);
2387 return vm_cc_cme(cc);
2388}
2389
2391rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2392{
2393 // Called from ZJIT with the compile-time iseq, which may differ from
2394 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2395 VALUE klass = CLASS_OF(recv);
2396 const struct rb_callcache *cc = vm_search_method_slowpath0(cd_owner, cd, klass);
2397 return vm_cc_cme(cc);
2398}
2399
2400#if __has_attribute(transparent_union)
2401typedef union {
2402 VALUE (*anyargs)(ANYARGS);
2403 VALUE (*f00)(VALUE);
2404 VALUE (*f01)(VALUE, VALUE);
2405 VALUE (*f02)(VALUE, VALUE, VALUE);
2406 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2407 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2408 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2409 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2410 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2419 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2420} __attribute__((__transparent_union__)) cfunc_type;
2421# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2422#else
2423typedef VALUE (*cfunc_type)(ANYARGS);
2424# define make_cfunc_type(f) (cfunc_type)(f)
2425#endif
2426
2427static inline int
2428check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2429{
2430 if (! me) {
2431 return false;
2432 }
2433 else {
2434 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2435 VM_ASSERT(callable_method_entry_p(me));
2436 VM_ASSERT(me->def);
2437 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2438 return false;
2439 }
2440 else {
2441#if __has_attribute(transparent_union)
2442 return me->def->body.cfunc.func == func.anyargs;
2443#else
2444 return me->def->body.cfunc.func == func;
2445#endif
2446 }
2447 }
2448}
2449
2450static inline int
2451check_method_basic_definition(const rb_callable_method_entry_t *me)
2452{
2453 return me && METHOD_ENTRY_BASIC(me);
2454}
2455
2456static inline int
2457vm_method_cfunc_is(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv, cfunc_type func)
2458{
2459 VM_ASSERT(reg_cfp != NULL);
2460 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
2461 return check_cfunc(cme, func);
2462}
2463
2464bool
2465rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2466{
2467 return check_cfunc(me, func);
2468}
2469
2470int
2471rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2472{
2473 // Called from ZJIT with the compile-time iseq, which may differ from
2474 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2475 VALUE klass = CLASS_OF(recv);
2476 const struct rb_callcache *cc = vm_search_method_slowpath0((VALUE)iseq, cd, klass);
2477 const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc);
2478 return check_cfunc(cme, func);
2479}
2480
2481#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2482#define vm_method_cfunc_is(reg_cfp, cd, recv, func) vm_method_cfunc_is(reg_cfp, cd, recv, make_cfunc_type(func))
2483
2484#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2485
2486static inline bool
2487FIXNUM_2_P(VALUE a, VALUE b)
2488{
2489 /* FIXNUM_P(a) && FIXNUM_P(b)
2490 * == ((a & 1) && (b & 1))
2491 * == a & b & 1 */
2492 SIGNED_VALUE x = a;
2493 SIGNED_VALUE y = b;
2494 SIGNED_VALUE z = x & y & 1;
2495 return z == 1;
2496}
2497
2498static inline bool
2499FLONUM_2_P(VALUE a, VALUE b)
2500{
2501#if USE_FLONUM
2502 /* FLONUM_P(a) && FLONUM_P(b)
2503 * == ((a & 3) == 2) && ((b & 3) == 2)
2504 * == ! ((a ^ 2) | (b ^ 2) & 3)
2505 */
2506 SIGNED_VALUE x = a;
2507 SIGNED_VALUE y = b;
2508 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2509 return !z;
2510#else
2511 return false;
2512#endif
2513}
2514
2515static VALUE
2516opt_equality_specialized(VALUE recv, VALUE obj)
2517{
2518 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2519 goto compare_by_identity;
2520 }
2521 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2522 goto compare_by_identity;
2523 }
2524 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2525 goto compare_by_identity;
2526 }
2527 else if (SPECIAL_CONST_P(recv)) {
2528 //
2529 }
2530 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2531 double a = RFLOAT_VALUE(recv);
2532 double b = RFLOAT_VALUE(obj);
2533
2534 return RBOOL(a == b);
2535 }
2536 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2537 if (recv == obj) {
2538 return Qtrue;
2539 }
2540 else if (RB_TYPE_P(obj, T_STRING)) {
2541 return rb_str_eql_internal(obj, recv);
2542 }
2543 }
2544 return Qundef;
2545
2546 compare_by_identity:
2547 return RBOOL(recv == obj);
2548}
2549
2550static VALUE
2551opt_equality(struct rb_control_frame_struct *reg_cfp, VALUE recv, VALUE obj, CALL_DATA cd)
2552{
2553 VM_ASSERT(reg_cfp != NULL);
2554
2555 VALUE val = opt_equality_specialized(recv, obj);
2556 if (!UNDEF_P(val)) return val;
2557
2558 if (!vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_equal)) {
2559 return Qundef;
2560 }
2561 else {
2562 return RBOOL(recv == obj);
2563 }
2564}
2565
2566#undef EQ_UNREDEFINED_P
2567
2568static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2569NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2570
2571static VALUE
2572opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2573{
2574 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2575
2576 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2577 return RBOOL(recv == obj);
2578 }
2579 else {
2580 return Qundef;
2581 }
2582}
2583
2584static VALUE
2585opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2586{
2587 VALUE val = opt_equality_specialized(recv, obj);
2588 if (!UNDEF_P(val)) {
2589 return val;
2590 }
2591 else {
2592 return opt_equality_by_mid_slowpath(recv, obj, mid);
2593 }
2594}
2595
2596VALUE
2597rb_equal_opt(VALUE obj1, VALUE obj2)
2598{
2599 return opt_equality_by_mid(obj1, obj2, idEq);
2600}
2601
2602VALUE
2603rb_eql_opt(VALUE obj1, VALUE obj2)
2604{
2605 return opt_equality_by_mid(obj1, obj2, idEqlP);
2606}
2607
2608extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2609extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2610
2611static VALUE
2612check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2613{
2614 switch (type) {
2615 case VM_CHECKMATCH_TYPE_WHEN:
2616 return pattern;
2617 case VM_CHECKMATCH_TYPE_RESCUE:
2618 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2619 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2620 }
2621 /* fall through */
2622 case VM_CHECKMATCH_TYPE_CASE: {
2623 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2624 }
2625 default:
2626 rb_bug("check_match: unreachable");
2627 }
2628}
2629
2630
2631static inline VALUE
2632double_cmp_lt(double a, double b)
2633{
2634 return RBOOL(a < b);
2635}
2636
2637static inline VALUE
2638double_cmp_le(double a, double b)
2639{
2640 return RBOOL(a <= b);
2641}
2642
2643static inline VALUE
2644double_cmp_gt(double a, double b)
2645{
2646 return RBOOL(a > b);
2647}
2648
2649static inline VALUE
2650double_cmp_ge(double a, double b)
2651{
2652 return RBOOL(a >= b);
2653}
2654
2655// Copied by vm_dump.c
2656static inline VALUE *
2657vm_base_ptr(const rb_control_frame_t *cfp)
2658{
2659 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2660
2661 if (CFP_ISEQ(cfp) && VM_FRAME_RUBYFRAME_P(cfp)) {
2662 VALUE *bp = prev_cfp->sp + ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size + VM_ENV_DATA_SIZE;
2663
2664 if (ISEQ_BODY(CFP_ISEQ(cfp))->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2665 int lts = ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size;
2666 int params = ISEQ_BODY(CFP_ISEQ(cfp))->param.size;
2667
2668 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2669 bp += vm_ci_argc(ci);
2670 }
2671
2672 if (ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2673 /* adjust `self' */
2674 bp += 1;
2675 }
2676#if VM_DEBUG_BP_CHECK
2677 if (bp != cfp->bp_check) {
2678 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2679 (long)(cfp->bp_check - GET_EC()->vm_stack),
2680 (long)(bp - GET_EC()->vm_stack));
2681 rb_bug("vm_base_ptr: unreachable");
2682 }
2683#endif
2684 return bp;
2685 }
2686 else {
2687 return NULL;
2688 }
2689}
2690
2691VALUE *
2692rb_vm_base_ptr(const rb_control_frame_t *cfp)
2693{
2694 return vm_base_ptr(cfp);
2695}
2696
2697/* method call processes with call_info */
2698
2699#include "vm_args.c"
2700
2701static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2702ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2703static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2704static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2705static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2706static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2707static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2708
2709static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2710
2711static VALUE
2712vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2713{
2714 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2715
2716 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2717}
2718
2719static VALUE
2720vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2721{
2722 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2723
2724 const struct rb_callcache *cc = calling->cc;
2725 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2726 int param = ISEQ_BODY(iseq)->param.size;
2727 int local = ISEQ_BODY(iseq)->local_table_size;
2728 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2729}
2730
2731bool
2732rb_simple_iseq_p(const rb_iseq_t *iseq)
2733{
2734 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2735 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2736 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2737 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2738 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2739 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2740 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2741 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2742 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2743}
2744
2745bool
2746rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2747{
2748 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2749 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2750 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2751 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2752 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2753 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2754 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2757}
2758
2759bool
2760rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2761{
2762 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2763 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2764 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2765 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2766 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2767 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2768 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2769 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2770}
2771
2772#define ALLOW_HEAP_ARGV (-2)
2773#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2774
2775static inline bool
2776vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2777{
2778 vm_check_canary(GET_EC(), cfp->sp);
2779 bool ret = false;
2780
2781 if (!NIL_P(ary)) {
2782 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2783 long len = RARRAY_LEN(ary);
2784 int argc = calling->argc;
2785
2786 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2787 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2788 * a temporary array, instead of trying to keeping arguments on the VM stack.
2789 */
2790 VALUE *argv = cfp->sp - argc;
2791 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2792 rb_ary_cat(argv_ary, argv, argc);
2793 rb_ary_cat(argv_ary, ptr, len);
2794 cfp->sp -= argc - 1;
2795 cfp->sp[-1] = argv_ary;
2796 calling->argc = 1;
2797 calling->heap_argv = argv_ary;
2798 RB_GC_GUARD(ary);
2799 }
2800 else {
2801 long i;
2802
2803 if (max_args >= 0 && len + argc > max_args) {
2804 /* If only a given max_args is allowed, copy up to max args.
2805 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2806 * where additional arguments are ignored.
2807 *
2808 * Also, copy up to one more argument than the maximum,
2809 * in case it is an empty keyword hash that will be removed.
2810 */
2811 calling->argc += len - (max_args - argc + 1);
2812 len = max_args - argc + 1;
2813 ret = true;
2814 }
2815 else {
2816 /* Unset heap_argv if set originally. Can happen when
2817 * forwarding modified arguments, where heap_argv was used
2818 * originally, but heap_argv not supported by the forwarded
2819 * method in all cases.
2820 */
2821 calling->heap_argv = 0;
2822 }
2823 CHECK_VM_STACK_OVERFLOW(cfp, len);
2824
2825 for (i = 0; i < len; i++) {
2826 *cfp->sp++ = ptr[i];
2827 }
2828 calling->argc += i;
2829 }
2830 }
2831
2832 return ret;
2833}
2834
2835static inline void
2836vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2837{
2838 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2839 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2840 const VALUE h = rb_hash_new_with_size(kw_len);
2841 VALUE *sp = cfp->sp;
2842 int i;
2843
2844 for (i=0; i<kw_len; i++) {
2845 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2846 }
2847 (sp-kw_len)[0] = h;
2848
2849 cfp->sp -= kw_len - 1;
2850 calling->argc -= kw_len - 1;
2851 calling->kw_splat = 1;
2852}
2853
2854static inline VALUE
2855vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2856{
2857 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2858 if (keyword_hash != Qnil) {
2859 /* Convert a non-hash keyword splat to a new hash */
2860 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2861 }
2862 }
2863 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2864 /* Convert a hash keyword splat to a new hash unless
2865 * a mutable keyword splat was passed.
2866 * Skip allocating new hash for empty keyword splat, as empty
2867 * keyword splat will be ignored by both callers.
2868 */
2869 keyword_hash = rb_hash_dup(keyword_hash);
2870 }
2871 return keyword_hash;
2872}
2873
2874static inline void
2875CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2876 struct rb_calling_info *restrict calling,
2877 const struct rb_callinfo *restrict ci, int max_args)
2878{
2879 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2880 if (IS_ARGS_KW_SPLAT(ci)) {
2881 // f(*a, **kw)
2882 VM_ASSERT(calling->kw_splat == 1);
2883
2884 cfp->sp -= 2;
2885 calling->argc -= 2;
2886 VALUE ary = cfp->sp[0];
2887 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2888
2889 // splat a
2890 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2891
2892 // put kw
2893 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2894 if (UNLIKELY(calling->heap_argv)) {
2895 rb_ary_push(calling->heap_argv, kwh);
2896 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2897 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2898 calling->kw_splat = 0;
2899 }
2900 }
2901 else {
2902 cfp->sp[0] = kwh;
2903 cfp->sp++;
2904 calling->argc++;
2905
2906 VM_ASSERT(calling->kw_splat == 1);
2907 }
2908 }
2909 else {
2910 calling->kw_splat = 0;
2911 }
2912 }
2913 else {
2914 // f(*a)
2915 VM_ASSERT(calling->kw_splat == 0);
2916
2917 cfp->sp -= 1;
2918 calling->argc -= 1;
2919 VALUE ary = cfp->sp[0];
2920
2921 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2922 goto check_keyword;
2923 }
2924
2925 // check the last argument
2926 VALUE last_hash, argv_ary;
2927 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2928 if (!IS_ARGS_KEYWORD(ci) &&
2929 RARRAY_LEN(argv_ary) > 0 &&
2930 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2931 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2932
2933 rb_ary_pop(argv_ary);
2934 if (!RHASH_EMPTY_P(last_hash)) {
2935 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2936 calling->kw_splat = 1;
2937 }
2938 }
2939 }
2940 else {
2941check_keyword:
2942 if (!IS_ARGS_KEYWORD(ci) &&
2943 calling->argc > 0 &&
2944 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2945 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2946
2947 if (RHASH_EMPTY_P(last_hash)) {
2948 calling->argc--;
2949 cfp->sp -= 1;
2950 }
2951 else {
2952 cfp->sp[-1] = rb_hash_dup(last_hash);
2953 calling->kw_splat = 1;
2954 }
2955 }
2956 }
2957 }
2958 }
2959 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2960 // f(**kw)
2961 VM_ASSERT(calling->kw_splat == 1);
2962 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2963
2964 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2965 cfp->sp--;
2966 calling->argc--;
2967 calling->kw_splat = 0;
2968 }
2969 else {
2970 cfp->sp[-1] = kwh;
2971 }
2972 }
2973 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2974 // f(k1:1, k2:2)
2975 VM_ASSERT(calling->kw_splat == 0);
2976
2977 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2978 * by creating a keyword hash.
2979 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2980 */
2981 vm_caller_setup_arg_kw(cfp, calling, ci);
2982 }
2983}
2984
2985#define USE_OPT_HIST 0
2986
2987#if USE_OPT_HIST
2988#define OPT_HIST_MAX 64
2989static int opt_hist[OPT_HIST_MAX+1];
2990
2991__attribute__((destructor))
2992static void
2993opt_hist_show_results_at_exit(void)
2994{
2995 for (int i=0; i<OPT_HIST_MAX; i++) {
2996 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2997 }
2998}
2999#endif
3000
3001static VALUE
3002vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3003 struct rb_calling_info *calling)
3004{
3005 const struct rb_callcache *cc = calling->cc;
3006 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3007 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3008 const int opt = calling->argc - lead_num;
3009 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3010 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3011 const int param = ISEQ_BODY(iseq)->param.size;
3012 const int local = ISEQ_BODY(iseq)->local_table_size;
3013 const int delta = opt_num - opt;
3014
3015 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3016
3017#if USE_OPT_HIST
3018 if (opt_pc < OPT_HIST_MAX) {
3019 opt_hist[opt]++;
3020 }
3021 else {
3022 opt_hist[OPT_HIST_MAX]++;
3023 }
3024#endif
3025
3026 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3027}
3028
3029static VALUE
3030vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3031 struct rb_calling_info *calling)
3032{
3033 const struct rb_callcache *cc = calling->cc;
3034 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3035 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3036 const int opt = calling->argc - lead_num;
3037 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3038
3039 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3040
3041#if USE_OPT_HIST
3042 if (opt_pc < OPT_HIST_MAX) {
3043 opt_hist[opt]++;
3044 }
3045 else {
3046 opt_hist[OPT_HIST_MAX]++;
3047 }
3048#endif
3049
3050 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3051}
3052
3053static void
3054args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3055 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3056 VALUE *const locals);
3057
3058static VALUE
3059vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3060 struct rb_calling_info *calling)
3061{
3062 const struct rb_callcache *cc = calling->cc;
3063 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3064 int param_size = ISEQ_BODY(iseq)->param.size;
3065 int local_size = ISEQ_BODY(iseq)->local_table_size;
3066
3067 // Setting up local size and param size
3068 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3069
3070 local_size = local_size + vm_ci_argc(calling->cd->ci);
3071 param_size = param_size + vm_ci_argc(calling->cd->ci);
3072
3073 cfp->sp[0] = (VALUE)calling->cd->ci;
3074
3075 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3076}
3077
3078static VALUE
3079vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3080 struct rb_calling_info *calling)
3081{
3082 const struct rb_callinfo *ci = calling->cd->ci;
3083 const struct rb_callcache *cc = calling->cc;
3084
3085 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3086 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3087
3088 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3089 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3090 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3091 const int ci_kw_len = kw_arg->keyword_len;
3092 const VALUE * const ci_keywords = kw_arg->keywords;
3093 VALUE *argv = cfp->sp - calling->argc;
3094 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3095 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3096 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3097 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3098 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3099
3100 int param = ISEQ_BODY(iseq)->param.size;
3101 int local = ISEQ_BODY(iseq)->local_table_size;
3102 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3103}
3104
3105static VALUE
3106vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3107 struct rb_calling_info *calling)
3108{
3109 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3110 const struct rb_callcache *cc = calling->cc;
3111
3112 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3113 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3114
3115 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3116 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3117 VALUE * const argv = cfp->sp - calling->argc;
3118 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3119
3120 int i;
3121 for (i=0; i<kw_param->num; i++) {
3122 klocals[i] = kw_param->default_values[i];
3123 }
3124 klocals[i] = INT2FIX(0); // kw specify flag
3125 // NOTE:
3126 // nobody check this value, but it should be cleared because it can
3127 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3128
3129 int param = ISEQ_BODY(iseq)->param.size;
3130 int local = ISEQ_BODY(iseq)->local_table_size;
3131 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3132}
3133
3134static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3135
3136static VALUE
3137vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3138 struct rb_calling_info *calling)
3139{
3140 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3141 cfp->sp -= (calling->argc + 1);
3142 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3143 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3144}
3145
3146VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3147
3148static void
3149warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3150{
3151 rb_vm_t *vm = GET_VM();
3152 set_table *dup_check_table = &vm->unused_block_warning_table;
3153 st_data_t key;
3154 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3155
3156 union {
3157 VALUE v;
3158 unsigned char b[SIZEOF_VALUE];
3159 } k1 = {
3160 .v = (VALUE)pc,
3161 }, k2 = {
3162 .v = (VALUE)cme->def,
3163 };
3164
3165 // relax check
3166 if (!strict_unused_block) {
3167 key = (st_data_t)cme->def->original_id;
3168
3169 if (set_table_lookup(dup_check_table, key)) {
3170 return;
3171 }
3172 }
3173
3174 // strict check
3175 // make unique key from pc and me->def pointer
3176 key = 0;
3177 for (int i=0; i<SIZEOF_VALUE; i++) {
3178 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3179 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3180 }
3181
3182 if (0) {
3183 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3184 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3185 fprintf(stderr, "key:%p\n", (void *)key);
3186 }
3187
3188 // duplication check
3189 if (set_insert(dup_check_table, key)) {
3190 // already shown
3191 }
3192 else if (RTEST(ruby_verbose) || strict_unused_block) {
3193 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3194 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3195
3196 if (!NIL_P(m_loc)) {
3197 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3198 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3199 }
3200 else {
3201 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3202 }
3203 }
3204}
3205
3206static inline int
3207vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3208 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3209{
3210 const struct rb_callinfo *ci = calling->cd->ci;
3211 const struct rb_callcache *cc = calling->cc;
3212
3213 VM_ASSERT((vm_ci_argc(ci), 1));
3214 VM_ASSERT(vm_cc_cme(cc) != NULL);
3215
3216 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3217 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3218 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3219 warn_unused_block(vm_cc_cme(cc), iseq, (void *)CFP_PC(ec->cfp));
3220 }
3221
3222 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3223 if (LIKELY(rb_simple_iseq_p(iseq))) {
3224 rb_control_frame_t *cfp = ec->cfp;
3225 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3226 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3227
3228 if (calling->argc != lead_num) {
3229 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3230 }
3231
3232 //VM_ASSERT(ci == calling->cd->ci);
3233 VM_ASSERT(cc == calling->cc);
3234
3235 if (vm_call_iseq_optimizable_p(ci, cc)) {
3236 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) && ruby_vm_c_events_enabled == 0) {
3237 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3238 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3239 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3240 }
3241 else {
3242 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3243 }
3244 }
3245 return 0;
3246 }
3247 else if (rb_iseq_only_optparam_p(iseq)) {
3248 rb_control_frame_t *cfp = ec->cfp;
3249
3250 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3251 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3252
3253 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3254 const int argc = calling->argc;
3255 const int opt = argc - lead_num;
3256
3257 if (opt < 0 || opt > opt_num) {
3258 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3259 }
3260
3261 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3262 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3263 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3264 vm_call_cacheable(ci, cc));
3265 }
3266 else {
3267 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3268 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3269 vm_call_cacheable(ci, cc));
3270 }
3271
3272 /* initialize opt vars for self-references */
3273 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3274 for (int i=argc; i<lead_num + opt_num; i++) {
3275 argv[i] = Qnil;
3276 }
3277 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3278 }
3279 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3280 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3281 const int argc = calling->argc;
3282 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3283
3284 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3285 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3286
3287 if (argc - kw_arg->keyword_len == lead_num) {
3288 const int ci_kw_len = kw_arg->keyword_len;
3289 const VALUE * const ci_keywords = kw_arg->keywords;
3290 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3291 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3292
3293 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3294 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3295
3296 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3297 vm_call_cacheable(ci, cc));
3298
3299 return 0;
3300 }
3301 }
3302 else if (argc == lead_num) {
3303 /* no kwarg */
3304 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3305 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3306
3307 if (klocals[kw_param->num] == INT2FIX(0)) {
3308 /* copy from default_values */
3309 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3310 vm_call_cacheable(ci, cc));
3311 }
3312
3313 return 0;
3314 }
3315 }
3316 }
3317
3318 // Called iseq is using ... param
3319 // def foo(...) # <- iseq for foo will have "forwardable"
3320 //
3321 // We want to set the `...` local to the caller's CI
3322 // foo(1, 2) # <- the ci for this should end up as `...`
3323 //
3324 // So hopefully the stack looks like:
3325 //
3326 // => 1
3327 // => 2
3328 // => *
3329 // => **
3330 // => &
3331 // => ... # <- points at `foo`s CI
3332 // => cref_or_me
3333 // => specval
3334 // => type
3335 //
3336 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3337 bool can_fastpath = true;
3338
3339 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3340 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3341 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3342 ci = vm_ci_new_runtime(
3343 vm_ci_mid(ci),
3344 vm_ci_flag(ci),
3345 vm_ci_argc(ci),
3346 vm_ci_kwarg(ci));
3347 }
3348 else {
3349 ci = forward_cd->caller_ci;
3350 }
3351 can_fastpath = false;
3352 }
3353 // C functions calling iseqs will stack allocate a CI,
3354 // so we need to convert it to heap allocated
3355 if (!vm_ci_markable(ci)) {
3356 ci = vm_ci_new_runtime(
3357 vm_ci_mid(ci),
3358 vm_ci_flag(ci),
3359 vm_ci_argc(ci),
3360 vm_ci_kwarg(ci));
3361 can_fastpath = false;
3362 }
3363 argv[param_size - 1] = (VALUE)ci;
3364 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3365 return 0;
3366 }
3367
3368 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3369}
3370
3371static void
3372vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3373{
3374 // This case is when the caller is using a ... parameter.
3375 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3376 // In this case the caller's caller's CI will be on the stack.
3377 //
3378 // For example:
3379 //
3380 // def bar(a, b); a + b; end
3381 // def foo(...); bar(...); end
3382 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3383 //
3384 // Stack layout will be:
3385 //
3386 // > 1
3387 // > 2
3388 // > CI for foo(1, 2)
3389 // > cref_or_me
3390 // > specval
3391 // > type
3392 // > receiver
3393 // > CI for foo(1, 2), via `getlocal ...`
3394 // > ( SP points here )
3395 const VALUE * lep = VM_CF_LEP(cfp);
3396
3397 const rb_iseq_t *iseq;
3398
3399 // If we're in an escaped environment (lambda for example), get the iseq
3400 // from the captured env.
3401 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3402 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3403 iseq = env->iseq;
3404 }
3405 else { // Otherwise use the lep to find the caller
3406 iseq = CFP_ISEQ(rb_vm_search_cf_from_ep(ec, cfp, lep));
3407 }
3408
3409 // Our local storage is below the args we need to copy
3410 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3411
3412 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3413 VALUE * to = cfp->sp - 1; // clobber the CI
3414
3415 if (RTEST(splat)) {
3416 to -= 1; // clobber the splat array
3417 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3418 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3419 to += RARRAY_LEN(splat);
3420 }
3421
3422 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3423 MEMCPY(to, from, VALUE, argc);
3424 cfp->sp = to + argc;
3425
3426 // Stack layout should now be:
3427 //
3428 // > 1
3429 // > 2
3430 // > CI for foo(1, 2)
3431 // > cref_or_me
3432 // > specval
3433 // > type
3434 // > receiver
3435 // > 1
3436 // > 2
3437 // > ( SP points here )
3438}
3439
3440static VALUE
3441vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3442{
3443 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3444
3445 const struct rb_callcache *cc = calling->cc;
3446 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3447 int param_size = ISEQ_BODY(iseq)->param.size;
3448 int local_size = ISEQ_BODY(iseq)->local_table_size;
3449
3450 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3451
3452 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3453 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3454}
3455
3456static VALUE
3457vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3458{
3459 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3460
3461 const struct rb_callcache *cc = calling->cc;
3462 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3463 int param_size = ISEQ_BODY(iseq)->param.size;
3464 int local_size = ISEQ_BODY(iseq)->local_table_size;
3465
3466 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3467
3468 // Setting up local size and param size
3469 local_size = local_size + vm_ci_argc(calling->cd->ci);
3470 param_size = param_size + vm_ci_argc(calling->cd->ci);
3471
3472 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3473 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3474}
3475
3476static inline VALUE
3477vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3478 int opt_pc, int param_size, int local_size)
3479{
3480 const struct rb_callinfo *ci = calling->cd->ci;
3481 const struct rb_callcache *cc = calling->cc;
3482
3483 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3484 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3485 }
3486 else {
3487 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3488 }
3489}
3490
3491static inline VALUE
3492vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3493 int opt_pc, int param_size, int local_size)
3494{
3495 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3496 VALUE *argv = cfp->sp - calling->argc;
3497 VALUE *sp = argv + param_size;
3498 cfp->sp = argv - 1 /* recv */;
3499
3500 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3501 calling->block_handler, (VALUE)me,
3502 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3503 local_size - param_size,
3504 ISEQ_BODY(iseq)->stack_max);
3505 return Qundef;
3506}
3507
3508static inline VALUE
3509vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3510{
3511 const struct rb_callcache *cc = calling->cc;
3512 unsigned int i;
3513 VALUE *argv = cfp->sp - calling->argc;
3514 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3515 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3516 VALUE *src_argv = argv;
3517 VALUE *sp_orig, *sp;
3518 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3519
3520 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3521 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3522 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3523 dst_captured->code.val = src_captured->code.val;
3524 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3525 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3526 }
3527 else {
3528 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3529 }
3530 }
3531
3532 vm_pop_frame(ec, cfp, cfp->ep);
3533 cfp = ec->cfp;
3534
3535 sp_orig = sp = cfp->sp;
3536
3537 /* push self */
3538 sp[0] = calling->recv;
3539 sp++;
3540
3541 /* copy arguments */
3542 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3543 *sp++ = src_argv[i];
3544 }
3545
3546 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3547 calling->recv, calling->block_handler, (VALUE)me,
3548 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3549 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3550 ISEQ_BODY(iseq)->stack_max);
3551
3552 cfp->sp = sp_orig;
3553
3554 return Qundef;
3555}
3556
3557static void
3558ractor_unsafe_check(void)
3559{
3560 if (!rb_ractor_main_p()) {
3561 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3562 }
3563}
3564
3565static VALUE
3566call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3567{
3568 ractor_unsafe_check();
3569 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3570 return (*f)(recv, rb_ary_new4(argc, argv));
3571}
3572
3573static VALUE
3574call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3575{
3576 ractor_unsafe_check();
3577 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3578 return (*f)(argc, argv, recv);
3579}
3580
3581static VALUE
3582call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3583{
3584 ractor_unsafe_check();
3585 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3586 return (*f)(recv);
3587}
3588
3589static VALUE
3590call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3591{
3592 ractor_unsafe_check();
3593 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3594 return (*f)(recv, argv[0]);
3595}
3596
3597static VALUE
3598call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3599{
3600 ractor_unsafe_check();
3601 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3602 return (*f)(recv, argv[0], argv[1]);
3603}
3604
3605static VALUE
3606call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3607{
3608 ractor_unsafe_check();
3609 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3610 return (*f)(recv, argv[0], argv[1], argv[2]);
3611}
3612
3613static VALUE
3614call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3615{
3616 ractor_unsafe_check();
3617 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3618 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3619}
3620
3621static VALUE
3622call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3623{
3624 ractor_unsafe_check();
3625 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3626 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3627}
3628
3629static VALUE
3630call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3631{
3632 ractor_unsafe_check();
3634 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3635}
3636
3637static VALUE
3638call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3639{
3640 ractor_unsafe_check();
3642 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3643}
3644
3645static VALUE
3646call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3647{
3648 ractor_unsafe_check();
3650 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3651}
3652
3653static VALUE
3654call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3655{
3656 ractor_unsafe_check();
3658 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3659}
3660
3661static VALUE
3662call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3663{
3664 ractor_unsafe_check();
3666 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3667}
3668
3669static VALUE
3670call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3671{
3672 ractor_unsafe_check();
3674 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3675}
3676
3677static VALUE
3678call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3679{
3680 ractor_unsafe_check();
3682 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3683}
3684
3685static VALUE
3686call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3687{
3688 ractor_unsafe_check();
3690 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3691}
3692
3693static VALUE
3694call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3695{
3696 ractor_unsafe_check();
3698 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3699}
3700
3701static VALUE
3702call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3703{
3704 ractor_unsafe_check();
3706 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3707}
3708
3709static VALUE
3710ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3711{
3712 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3713 return (*f)(recv, rb_ary_new4(argc, argv));
3714}
3715
3716static VALUE
3717ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3718{
3719 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3720 return (*f)(argc, argv, recv);
3721}
3722
3723static VALUE
3724ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3725{
3726 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3727 return (*f)(recv);
3728}
3729
3730static VALUE
3731ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3732{
3733 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3734 return (*f)(recv, argv[0]);
3735}
3736
3737static VALUE
3738ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3739{
3740 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3741 return (*f)(recv, argv[0], argv[1]);
3742}
3743
3744static VALUE
3745ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3746{
3747 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3748 return (*f)(recv, argv[0], argv[1], argv[2]);
3749}
3750
3751static VALUE
3752ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3753{
3754 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3755 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3756}
3757
3758static VALUE
3759ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3760{
3761 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3762 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3763}
3764
3765static VALUE
3766ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3767{
3769 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3770}
3771
3772static VALUE
3773ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3774{
3776 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3777}
3778
3779static VALUE
3780ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3781{
3783 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3784}
3785
3786static VALUE
3787ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3788{
3790 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3791}
3792
3793static VALUE
3794ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3795{
3797 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3798}
3799
3800static VALUE
3801ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3802{
3804 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3805}
3806
3807static VALUE
3808ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3809{
3811 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3812}
3813
3814static VALUE
3815ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3816{
3818 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3819}
3820
3821static VALUE
3822ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3823{
3825 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3826}
3827
3828static VALUE
3829ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3830{
3832 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3833}
3834
3835static inline int
3836vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3837{
3838 const int ov_flags = RAISED_STACKOVERFLOW;
3839 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3840 if (rb_ec_raised_p(ec, ov_flags)) {
3841 rb_ec_raised_reset(ec, ov_flags);
3842 return TRUE;
3843 }
3844 return FALSE;
3845}
3846
3847#define CHECK_CFP_CONSISTENCY(func) \
3848 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3849 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3850
3851static inline
3852const rb_method_cfunc_t *
3853vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3854{
3855#if VM_DEBUG_VERIFY_METHOD_CACHE
3856 switch (me->def->type) {
3857 case VM_METHOD_TYPE_CFUNC:
3858 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3859 break;
3860# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3861 METHOD_BUG(ISEQ);
3862 METHOD_BUG(ATTRSET);
3863 METHOD_BUG(IVAR);
3864 METHOD_BUG(BMETHOD);
3865 METHOD_BUG(ZSUPER);
3866 METHOD_BUG(UNDEF);
3867 METHOD_BUG(OPTIMIZED);
3868 METHOD_BUG(MISSING);
3869 METHOD_BUG(REFINED);
3870 METHOD_BUG(ALIAS);
3871# undef METHOD_BUG
3872 default:
3873 rb_bug("wrong method type: %d", me->def->type);
3874 }
3875#endif
3876 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3877}
3878
3879static VALUE
3880vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3881 int argc, VALUE *argv, VALUE *stack_bottom)
3882{
3883 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3884 const struct rb_callinfo *ci = calling->cd->ci;
3885 const struct rb_callcache *cc = calling->cc;
3886 VALUE val;
3887 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3888 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3889
3890 VALUE recv = calling->recv;
3891 VALUE block_handler = calling->block_handler;
3892 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3893
3894 if (UNLIKELY(calling->kw_splat)) {
3895 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3896 }
3897
3898 VM_ASSERT(reg_cfp == ec->cfp);
3899
3900 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3901 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3902
3903 vm_push_frame(ec, NULL, frame_type, recv,
3904 block_handler, (VALUE)me,
3905 0, ec->cfp->sp, 0, 0);
3906
3907 int len = cfunc->argc;
3908 if (len >= 0) rb_check_arity(argc, len, len);
3909
3910 reg_cfp->sp = stack_bottom;
3911 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3912
3913 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3914
3915 rb_vm_pop_frame(ec);
3916
3917 VM_ASSERT(ec->cfp->sp == stack_bottom);
3918
3919 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3920 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3921
3922 return val;
3923}
3924
3925// Push a C method frame for a given cme. This is called when JIT code skipped
3926// pushing a frame but the C method reached a point where a frame is needed.
3927void
3928rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3929{
3930 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3931 rb_execution_context_t *ec = GET_EC();
3932 VALUE *sp = ec->cfp->sp;
3933 VALUE recv = *(sp - recv_idx - 1);
3934 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3935 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3936#if VM_CHECK_MODE > 0
3937 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3938 *(GET_EC()->cfp->sp) = Qfalse;
3939#endif
3940 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3941}
3942
3943// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3944bool
3945rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3946{
3947 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3948}
3949
3950static VALUE
3951vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3952{
3953 int argc = calling->argc;
3954 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3955 VALUE *argv = &stack_bottom[1];
3956
3957 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3958}
3959
3960static VALUE
3961vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3962{
3963 const struct rb_callinfo *ci = calling->cd->ci;
3964 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3965
3966 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3967 VALUE argv_ary;
3968 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3969 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3970 int argc = RARRAY_LENINT(argv_ary);
3971 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3972 VALUE *stack_bottom = reg_cfp->sp - 2;
3973
3974 VM_ASSERT(calling->argc == 1);
3975 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3976 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3977
3978 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3979 }
3980 else {
3981 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3982
3983 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3984 }
3985}
3986
3987static inline VALUE
3988vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3989{
3990 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3991 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3992
3993 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3994 return vm_call_cfunc_other(ec, reg_cfp, calling);
3995 }
3996
3997 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3998 calling->kw_splat = 0;
3999 int i;
4000 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
4001 VALUE *sp = stack_bottom;
4002 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
4003 for(i = 0; i < argc; i++) {
4004 *++sp = argv[i];
4005 }
4006 reg_cfp->sp = sp+1;
4007
4008 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4009}
4010
4011static inline VALUE
4012vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4013{
4014 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4015 VALUE argv_ary = reg_cfp->sp[-1];
4016 int argc = RARRAY_LENINT(argv_ary);
4017 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4018 VALUE last_hash;
4019 int argc_offset = 0;
4020
4021 if (UNLIKELY(argc > 0 &&
4022 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4023 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4024 if (!RHASH_EMPTY_P(last_hash)) {
4025 return vm_call_cfunc_other(ec, reg_cfp, calling);
4026 }
4027 argc_offset++;
4028 }
4029 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4030}
4031
4032static inline VALUE
4033vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4034{
4035 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4036 VALUE keyword_hash = reg_cfp->sp[-1];
4037
4038 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4039 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4040 }
4041
4042 return vm_call_cfunc_other(ec, reg_cfp, calling);
4043}
4044
4045static VALUE
4046vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4047{
4048 const struct rb_callinfo *ci = calling->cd->ci;
4049 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4050
4051 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4052 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4053 // f(*a)
4054 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4055 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4056 }
4057 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4058 // f(*a, **kw)
4059 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4060 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4061 }
4062 }
4063
4064 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4065 return vm_call_cfunc_other(ec, reg_cfp, calling);
4066}
4067
4068static VALUE
4069vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4070{
4071 const struct rb_callcache *cc = calling->cc;
4072 RB_DEBUG_COUNTER_INC(ccf_ivar);
4073 cfp->sp -= 1;
4074 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4075 return ivar;
4076}
4077
4078static VALUE
4079vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4080{
4081 RB_DEBUG_COUNTER_INC(ccf_attrset);
4082 VALUE val = *(cfp->sp - 1);
4083 cfp->sp -= 2;
4084 attr_index_t index;
4085 shape_id_t dest_shape_id;
4086 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4087 ID id = vm_cc_cme(cc)->def->body.attr.id;
4088 rb_check_frozen(obj);
4089 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4090 if (UNDEF_P(res)) {
4091 switch (BUILTIN_TYPE(obj)) {
4092 case T_OBJECT:
4093 break;
4094 case T_CLASS:
4095 case T_MODULE:
4096 {
4097 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4098 if (!UNDEF_P(res)) {
4099 return res;
4100 }
4101 }
4102 break;
4103 default:
4104 {
4105 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4106 if (!UNDEF_P(res)) {
4107 return res;
4108 }
4109 }
4110 }
4111 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4112 }
4113 return res;
4114}
4115
4116static VALUE
4117vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4118{
4119 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4120}
4121
4122static inline VALUE
4123vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4124{
4125 rb_proc_t *proc;
4126 VALUE val;
4127 const struct rb_callcache *cc = calling->cc;
4128 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4129 VALUE procv = cme->def->body.bmethod.proc;
4130
4131 if (!RB_OBJ_SHAREABLE_P(procv) &&
4132 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4133 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4134 }
4135
4136 /* control block frame */
4137 GetProcPtr(procv, proc);
4138 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4139
4140 return val;
4141}
4142
4143static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4144
4145static VALUE
4146vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4147{
4148 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4149
4150 const struct rb_callcache *cc = calling->cc;
4151 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4152 VALUE procv = cme->def->body.bmethod.proc;
4153
4154 if (!RB_OBJ_SHAREABLE_P(procv) &&
4155 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4156 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4157 }
4158
4159 rb_proc_t *proc;
4160 GetProcPtr(procv, proc);
4161 const struct rb_block *block = &proc->block;
4162
4163 while (vm_block_type(block) == block_type_proc) {
4164 block = vm_proc_block(block->as.proc);
4165 }
4166 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4167
4168 const struct rb_captured_block *captured = &block->as.captured;
4169 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4170 VALUE * const argv = cfp->sp - calling->argc;
4171 const int arg_size = ISEQ_BODY(iseq)->param.size;
4172
4173 int opt_pc;
4174 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4175 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4176 }
4177 else {
4178 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4179 }
4180
4181 cfp->sp = argv - 1; // -1 for the receiver
4182
4183 vm_push_frame(ec, iseq,
4184 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4185 calling->recv,
4186 VM_GUARDED_PREV_EP(captured->ep),
4187 (VALUE)cme,
4188 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4189 argv + arg_size,
4190 ISEQ_BODY(iseq)->local_table_size - arg_size,
4191 ISEQ_BODY(iseq)->stack_max);
4192
4193 return Qundef;
4194}
4195
4196static VALUE
4197vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4198{
4199 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4200
4201 VALUE *argv;
4202 int argc;
4203 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4204 if (UNLIKELY(calling->heap_argv)) {
4205 argv = RARRAY_PTR(calling->heap_argv);
4206 cfp->sp -= 2;
4207 }
4208 else {
4209 argc = calling->argc;
4210 argv = ALLOCA_N(VALUE, argc);
4211 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4212 cfp->sp += - argc - 1;
4213 }
4214
4215 return vm_call_bmethod_body(ec, calling, argv);
4216}
4217
4218static VALUE
4219vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4220{
4221 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4222
4223 const struct rb_callcache *cc = calling->cc;
4224 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4225 VALUE procv = cme->def->body.bmethod.proc;
4226 rb_proc_t *proc;
4227 GetProcPtr(procv, proc);
4228 const struct rb_block *block = &proc->block;
4229
4230 while (vm_block_type(block) == block_type_proc) {
4231 block = vm_proc_block(block->as.proc);
4232 }
4233 if (vm_block_type(block) == block_type_iseq) {
4234 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4235 return vm_call_iseq_bmethod(ec, cfp, calling);
4236 }
4237
4238 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4239 return vm_call_noniseq_bmethod(ec, cfp, calling);
4240}
4241
4242VALUE
4243rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4244{
4245 VALUE klass = current_class;
4246
4247 /* for prepended Module, then start from cover class */
4248 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4249 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4250 klass = RBASIC_CLASS(klass);
4251 }
4252
4253 while (RTEST(klass)) {
4254 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4255 if (owner == target_owner) {
4256 return klass;
4257 }
4258 klass = RCLASS_SUPER(klass);
4259 }
4260
4261 return current_class; /* maybe module function */
4262}
4263
4264static const rb_callable_method_entry_t *
4265aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4266{
4267 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4268 const rb_callable_method_entry_t *cme;
4269
4270 if (orig_me->defined_class == 0) {
4271 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4272 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4273 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4274
4275 if (me->def->reference_count == 1) {
4276 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4277 }
4278 else {
4280 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4281 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4282 }
4283 }
4284 else {
4285 cme = (const rb_callable_method_entry_t *)orig_me;
4286 }
4287
4288 VM_ASSERT(callable_method_entry_p(cme));
4289 return cme;
4290}
4291
4293rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4294{
4295 return aliased_callable_method_entry(me);
4296}
4297
4298static VALUE
4299vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4300{
4301 calling->cc = &VM_CC_ON_STACK(Qundef,
4302 vm_call_general,
4303 {{0}},
4304 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4305
4306 return vm_call_method_each_type(ec, cfp, calling);
4307}
4308
4309static enum method_missing_reason
4310ci_missing_reason(const struct rb_callinfo *ci)
4311{
4312 enum method_missing_reason stat = MISSING_NOENTRY;
4313 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4314 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4315 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4316 return stat;
4317}
4318
4319static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4320
4321static VALUE
4322vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4323 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4324{
4325 ASSUME(calling->argc >= 0);
4326
4327 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4328 int argc = calling->argc;
4329 VALUE recv = calling->recv;
4330 VALUE klass = CLASS_OF(recv);
4331 ID mid = rb_check_id(&symbol);
4332 flags |= VM_CALL_OPT_SEND;
4333
4334 if (UNLIKELY(! mid)) {
4335 mid = idMethodMissing;
4336 missing_reason = ci_missing_reason(ci);
4337 ec->method_missing_reason = missing_reason;
4338
4339 VALUE argv_ary;
4340 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4341 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4342 rb_ary_unshift(argv_ary, symbol);
4343
4344 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4345 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4346 VALUE exc = rb_make_no_method_exception(
4347 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4348
4349 rb_exc_raise(exc);
4350 }
4351 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4352 }
4353 else {
4354 /* E.g. when argc == 2
4355 *
4356 * | | | | TOPN
4357 * | | +------+
4358 * | | +---> | arg1 | 0
4359 * +------+ | +------+
4360 * | arg1 | -+ +-> | arg0 | 1
4361 * +------+ | +------+
4362 * | arg0 | ---+ | sym | 2
4363 * +------+ +------+
4364 * | recv | | recv | 3
4365 * --+------+--------+------+------
4366 */
4367 int i = argc;
4368 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4369 INC_SP(1);
4370 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4371 argc = ++calling->argc;
4372
4373 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4374 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4375 TOPN(i) = symbol;
4376 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4377 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4378 VALUE exc = rb_make_no_method_exception(
4379 rb_eNoMethodError, 0, recv, argc, argv, priv);
4380
4381 rb_exc_raise(exc);
4382 }
4383 else {
4384 TOPN(i) = rb_str_intern(symbol);
4385 }
4386 }
4387 }
4388
4389 struct rb_forwarding_call_data new_fcd = {
4390 .cd = {
4391 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4392 .cc = NULL,
4393 },
4394 .caller_ci = NULL,
4395 };
4396
4397 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4398 calling->cd = &new_fcd.cd;
4399 }
4400 else {
4401 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4402 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4403 new_fcd.caller_ci = caller_ci;
4404 calling->cd = (struct rb_call_data *)&new_fcd;
4405 }
4406 calling->cc = &VM_CC_ON_STACK(klass,
4407 vm_call_general,
4408 { .method_missing_reason = missing_reason },
4409 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4410
4411 if (flags & VM_CALL_FCALL) {
4412 return vm_call_method(ec, reg_cfp, calling);
4413 }
4414
4415 const struct rb_callcache *cc = calling->cc;
4416 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4417
4418 if (vm_cc_cme(cc) != NULL) {
4419 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4420 case METHOD_VISI_PUBLIC: /* likely */
4421 return vm_call_method_each_type(ec, reg_cfp, calling);
4422 case METHOD_VISI_PRIVATE:
4423 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4424 break;
4425 case METHOD_VISI_PROTECTED:
4426 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4427 break;
4428 default:
4429 VM_UNREACHABLE(vm_call_method);
4430 }
4431 return vm_call_method_missing(ec, reg_cfp, calling);
4432 }
4433
4434 return vm_call_method_nome(ec, reg_cfp, calling);
4435}
4436
4437static VALUE
4438vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4439{
4440 const struct rb_callinfo *ci = calling->cd->ci;
4441 int i;
4442 VALUE sym;
4443
4444 i = calling->argc - 1;
4445
4446 if (calling->argc == 0) {
4447 rb_raise(rb_eArgError, "no method name given");
4448 }
4449
4450 sym = TOPN(i);
4451 /* E.g. when i == 2
4452 *
4453 * | | | | TOPN
4454 * +------+ | |
4455 * | arg1 | ---+ | | 0
4456 * +------+ | +------+
4457 * | arg0 | -+ +-> | arg1 | 1
4458 * +------+ | +------+
4459 * | sym | +---> | arg0 | 2
4460 * +------+ +------+
4461 * | recv | | recv | 3
4462 * --+------+--------+------+------
4463 */
4464 /* shift arguments */
4465 if (i > 0) {
4466 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4467 }
4468 calling->argc -= 1;
4469 DEC_SP(1);
4470
4471 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4472}
4473
4474static VALUE
4475vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4476{
4477 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4478 const struct rb_callinfo *ci = calling->cd->ci;
4479 int flags = VM_CALL_FCALL;
4480 VALUE sym;
4481
4482 VALUE argv_ary;
4483 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4484 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4485 sym = rb_ary_shift(argv_ary);
4486 flags |= VM_CALL_ARGS_SPLAT;
4487 if (calling->kw_splat) {
4488 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4489 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4490 calling->kw_splat = 0;
4491 }
4492 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4493 }
4494
4495 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4496 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4497}
4498
4499static VALUE
4500vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4501{
4502 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4503 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4504}
4505
4506static VALUE
4507vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4508{
4509 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4510
4511 const struct rb_callinfo *ci = calling->cd->ci;
4512 int flags = vm_ci_flag(ci);
4513
4514 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4515 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4516 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4517 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4518 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4519 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4520 }
4521
4522 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4523 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4524}
4525
4526static VALUE
4527vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4528 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4529{
4530 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4531
4532 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4533 unsigned int argc, flag;
4534
4535 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4536 argc = ++calling->argc;
4537
4538 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4539 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4540 vm_check_canary(ec, reg_cfp->sp);
4541 if (argc > 1) {
4542 MEMMOVE(argv+1, argv, VALUE, argc-1);
4543 }
4544 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4545 INC_SP(1);
4546
4547 ec->method_missing_reason = reason;
4548
4549 struct rb_forwarding_call_data new_fcd = {
4550 .cd = {
4551 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4552 .cc = NULL,
4553 },
4554 .caller_ci = NULL,
4555 };
4556
4557 if (!(flag & VM_CALL_FORWARDING)) {
4558 calling->cd = &new_fcd.cd;
4559 }
4560 else {
4561 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4562 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4563 new_fcd.caller_ci = caller_ci;
4564 calling->cd = (struct rb_call_data *)&new_fcd;
4565 }
4566
4567 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4568 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4569 return vm_call_method(ec, reg_cfp, calling);
4570}
4571
4572static VALUE
4573vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4574{
4575 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4576}
4577
4578static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4579static VALUE
4580vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4581{
4582 klass = RCLASS_SUPER(klass);
4583
4584 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4585 if (cme == NULL) {
4586 return vm_call_method_nome(ec, cfp, calling);
4587 }
4588 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4589 cme->def->body.refined.orig_me) {
4590 cme = refined_method_callable_without_refinement(cme);
4591 }
4592
4593 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4594
4595 return vm_call_method_each_type(ec, cfp, calling);
4596}
4597
4598static inline VALUE
4599find_refinement(VALUE refinements, VALUE klass)
4600{
4601 if (NIL_P(refinements)) {
4602 return Qnil;
4603 }
4604 return rb_hash_lookup(refinements, klass);
4605}
4606
4607PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4608static rb_control_frame_t *
4609current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4610{
4611 rb_control_frame_t *top_cfp = cfp;
4612
4613 if (CFP_ISEQ(cfp) && ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_BLOCK) {
4614 const rb_iseq_t *local_iseq = ISEQ_BODY(CFP_ISEQ(cfp))->local_iseq;
4615
4616 do {
4617 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4618 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4619 /* TODO: orphan block */
4620 return top_cfp;
4621 }
4622 } while (CFP_ISEQ(cfp) != local_iseq);
4623 }
4624 return cfp;
4625}
4626
4627static const rb_callable_method_entry_t *
4628refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4629{
4630 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4631 const rb_callable_method_entry_t *cme;
4632
4633 if (orig_me->defined_class == 0) {
4634 cme = NULL;
4636 }
4637 else {
4638 cme = (const rb_callable_method_entry_t *)orig_me;
4639 }
4640
4641 VM_ASSERT(callable_method_entry_p(cme));
4642
4643 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4644 cme = NULL;
4645 }
4646
4647 return cme;
4648}
4649
4650static const rb_callable_method_entry_t *
4651search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4652{
4653 ID mid = vm_ci_mid(calling->cd->ci);
4654 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4655 const struct rb_callcache * const cc = calling->cc;
4656 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4657
4658 for (; cref; cref = CREF_NEXT(cref)) {
4659 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4660 if (NIL_P(refinement)) continue;
4661
4662 const rb_callable_method_entry_t *const ref_me =
4663 rb_callable_method_entry(refinement, mid);
4664
4665 if (ref_me) {
4666 if (vm_cc_call(cc) == vm_call_super_method) {
4667 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4668 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4669 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4670 continue;
4671 }
4672 }
4673
4674 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4675 cme->def != ref_me->def) {
4676 cme = ref_me;
4677 }
4678 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4679 return cme;
4680 }
4681 }
4682 else {
4683 return NULL;
4684 }
4685 }
4686
4687 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4688 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4689 }
4690 else {
4691 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4692 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4693 return cme;
4694 }
4695}
4696
4697static VALUE
4698vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4699{
4700 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4701
4702 if (ref_cme) {
4703 if (calling->cd->cc) {
4704 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4705 RB_OBJ_WRITE(CFP_ISEQ(cfp), &calling->cd->cc, cc);
4706 return vm_call_method(ec, cfp, calling);
4707 }
4708 else {
4709 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4710 calling->cc= ref_cc;
4711 return vm_call_method(ec, cfp, calling);
4712 }
4713 }
4714 else {
4715 return vm_call_method_nome(ec, cfp, calling);
4716 }
4717}
4718
4719static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4720
4721NOINLINE(static VALUE
4722 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4723 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4724
4725static VALUE
4726vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4727 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4728{
4729 int argc = calling->argc;
4730
4731 /* remove self */
4732 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4733 DEC_SP(1);
4734
4735 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4736}
4737
4738static VALUE
4739vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4740{
4741 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4742
4743 const struct rb_callinfo *ci = calling->cd->ci;
4744 VALUE procval = calling->recv;
4745 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4746}
4747
4748static VALUE
4749vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4750{
4751 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4752
4753 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4754 const struct rb_callinfo *ci = calling->cd->ci;
4755
4756 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4757 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4758 }
4759 else {
4760 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4761 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4762 return vm_call_general(ec, reg_cfp, calling);
4763 }
4764}
4765
4766static VALUE
4767vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4768{
4769 VALUE recv = calling->recv;
4770
4771 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4772 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4773 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4774
4775 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4776 return RSTRUCT_GET_RAW(recv, off);
4777}
4778
4779static VALUE
4780vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4781{
4782 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4783
4784 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4785 reg_cfp->sp -= 1;
4786 return ret;
4787}
4788
4789static VALUE
4790vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4791{
4792 VALUE recv = calling->recv;
4793
4794 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4795 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4796 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4797
4798 rb_check_frozen(recv);
4799
4800 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4801 RSTRUCT_SET_RAW(recv, off, val);
4802
4803 return val;
4804}
4805
4806static VALUE
4807vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4808{
4809 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4810
4811 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4812 reg_cfp->sp -= 2;
4813 return ret;
4814}
4815
4816NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4817 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4818
4819#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4820 if (UNLIKELY(ruby_vm_c_events_enabled > 0)) { \
4821 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4822 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4823 var = func; \
4824 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4825 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4826 } \
4827 else { \
4828 nohook; \
4829 var = func; \
4830 }
4831
4832static VALUE
4833vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4834 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4835{
4836 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4837 case OPTIMIZED_METHOD_TYPE_SEND:
4838 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4839 return vm_call_opt_send(ec, cfp, calling);
4840 case OPTIMIZED_METHOD_TYPE_CALL:
4841 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4842 return vm_call_opt_call(ec, cfp, calling);
4843 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4844 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4845 return vm_call_opt_block_call(ec, cfp, calling);
4846 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4847 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4848 rb_check_arity(calling->argc, 0, 0);
4849
4850 VALUE v;
4851 VM_CALL_METHOD_ATTR(v,
4852 vm_call_opt_struct_aref(ec, cfp, calling),
4853 set_vm_cc_ivar(cc); \
4854 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4855 return v;
4856 }
4857 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4858 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4859 rb_check_arity(calling->argc, 1, 1);
4860
4861 VALUE v;
4862 VM_CALL_METHOD_ATTR(v,
4863 vm_call_opt_struct_aset(ec, cfp, calling),
4864 set_vm_cc_ivar(cc); \
4865 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4866 return v;
4867 }
4868 default:
4869 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4870 }
4871}
4872
4873static VALUE
4874vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4875{
4876 const struct rb_callinfo *ci = calling->cd->ci;
4877 const struct rb_callcache *cc = calling->cc;
4878 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4879 VALUE v;
4880
4881 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4882
4883 switch (cme->def->type) {
4884 case VM_METHOD_TYPE_ISEQ:
4885 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4886 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4887 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4888 }
4889 else {
4890 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4891 return vm_call_iseq_setup(ec, cfp, calling);
4892 }
4893
4894 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4895 case VM_METHOD_TYPE_CFUNC:
4896 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4897 return vm_call_cfunc(ec, cfp, calling);
4898
4899 case VM_METHOD_TYPE_ATTRSET:
4900 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4901
4902 rb_check_arity(calling->argc, 1, 1);
4903
4904 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4905
4906 if (vm_cc_markable(cc)) {
4907 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4908 VM_CALL_METHOD_ATTR(v,
4909 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4910 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4911 }
4912 else {
4913 cc = &((struct rb_callcache) {
4914 .flags = T_IMEMO |
4915 (imemo_callcache << FL_USHIFT) |
4916 VM_CALLCACHE_UNMARKABLE |
4917 VM_CALLCACHE_ON_STACK,
4918 .klass = cc->klass,
4919 .cme_ = cc->cme_,
4920 .call_ = cc->call_,
4921 .aux_ = {
4922 .attr = {
4923 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4924 }
4925 },
4926 });
4927
4928 VM_CALL_METHOD_ATTR(v,
4929 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4930 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4931 }
4932 return v;
4933
4934 case VM_METHOD_TYPE_IVAR:
4935 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4936 rb_check_arity(calling->argc, 0, 0);
4937 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4938 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4939 VM_CALL_METHOD_ATTR(v,
4940 vm_call_ivar(ec, cfp, calling),
4941 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4942 return v;
4943
4944 case VM_METHOD_TYPE_MISSING:
4945 vm_cc_method_missing_reason_set(cc, 0);
4946 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4947 return vm_call_method_missing(ec, cfp, calling);
4948
4949 case VM_METHOD_TYPE_BMETHOD:
4950 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4951 return vm_call_bmethod(ec, cfp, calling);
4952
4953 case VM_METHOD_TYPE_ALIAS:
4954 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4955 return vm_call_alias(ec, cfp, calling);
4956
4957 case VM_METHOD_TYPE_OPTIMIZED:
4958 return vm_call_optimized(ec, cfp, calling, ci, cc);
4959
4960 case VM_METHOD_TYPE_UNDEF:
4961 break;
4962
4963 case VM_METHOD_TYPE_ZSUPER:
4964 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4965
4966 case VM_METHOD_TYPE_REFINED:
4967 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4968 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4969 return vm_call_refined(ec, cfp, calling);
4970 }
4971
4972 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4973}
4974
4975NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4976
4977static VALUE
4978vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4979{
4980 /* method missing */
4981 const struct rb_callinfo *ci = calling->cd->ci;
4982 const int stat = ci_missing_reason(ci);
4983
4984 if (vm_ci_mid(ci) == idMethodMissing) {
4985 if (UNLIKELY(calling->heap_argv)) {
4986 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4987 }
4988 else {
4989 rb_control_frame_t *reg_cfp = cfp;
4990 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4991 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4992 }
4993 }
4994 else {
4995 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4996 }
4997}
4998
4999/* Protected method calls and super invocations need to check that the receiver
5000 * (self for super) inherits the module on which the method is defined.
5001 * In the case of refinements, it should consider the original class not the
5002 * refinement.
5003 */
5004static VALUE
5005vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
5006{
5007 VALUE defined_class = me->defined_class;
5008 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5009 return NIL_P(refined_class) ? defined_class : refined_class;
5010}
5011
5012static inline VALUE
5013vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5014{
5015 const struct rb_callinfo *ci = calling->cd->ci;
5016 const struct rb_callcache *cc = calling->cc;
5017
5018 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5019
5020 if (vm_cc_cme(cc) != NULL) {
5021 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5022 case METHOD_VISI_PUBLIC: /* likely */
5023 return vm_call_method_each_type(ec, cfp, calling);
5024
5025 case METHOD_VISI_PRIVATE:
5026 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5027 enum method_missing_reason stat = MISSING_PRIVATE;
5028 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5029
5030 vm_cc_method_missing_reason_set(cc, stat);
5031 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5032 return vm_call_method_missing(ec, cfp, calling);
5033 }
5034 return vm_call_method_each_type(ec, cfp, calling);
5035
5036 case METHOD_VISI_PROTECTED:
5037 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5038 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5039 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5040 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5041 return vm_call_method_missing(ec, cfp, calling);
5042 }
5043 else {
5044 /* caching method info to dummy cc */
5045 VM_ASSERT(vm_cc_cme(cc) != NULL);
5046 struct rb_callcache cc_on_stack = *cc;
5047 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5048 calling->cc = &cc_on_stack;
5049 return vm_call_method_each_type(ec, cfp, calling);
5050 }
5051 }
5052 return vm_call_method_each_type(ec, cfp, calling);
5053
5054 default:
5055 rb_bug("unreachable");
5056 }
5057 }
5058 else {
5059 return vm_call_method_nome(ec, cfp, calling);
5060 }
5061}
5062
5063static VALUE
5064vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5065{
5066 RB_DEBUG_COUNTER_INC(ccf_general);
5067 return vm_call_method(ec, reg_cfp, calling);
5068}
5069
5070void
5071rb_vm_cc_general(const struct rb_callcache *cc)
5072{
5073 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5074 VM_ASSERT(cc != vm_cc_empty());
5075
5076 *(vm_call_handler *)&cc->call_ = vm_call_general;
5077}
5078
5079static VALUE
5080vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5081{
5082 RB_DEBUG_COUNTER_INC(ccf_super_method);
5083
5084 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5085 // can merge the function and the address of the function becomes same.
5086 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5087 if (ec == NULL) rb_bug("unreachable");
5088
5089 /* this check is required to distinguish with other functions. */
5090 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5091 return vm_call_method(ec, reg_cfp, calling);
5092}
5093
5094/* super */
5095
5096static inline VALUE
5097vm_search_normal_superclass(VALUE klass)
5098{
5099 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5100 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5101 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5102 klass = RBASIC(klass)->klass;
5103 }
5104 klass = RCLASS_ORIGIN(klass);
5105 return RCLASS_SUPER(klass);
5106}
5107
5108NORETURN(static void vm_super_outside(void));
5109
5110static void
5111vm_super_outside(void)
5112{
5113 rb_raise(rb_eNoMethodError, "super called outside of method");
5114}
5115
5116static const struct rb_callcache *
5117empty_cc_for_super(void)
5118{
5119 return &vm_empty_cc_for_super;
5120}
5121
5122static const struct rb_callcache *
5123vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5124{
5125 VALUE current_defined_class;
5126 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
5127 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5128
5129 if (!me) {
5130 vm_super_outside();
5131 }
5132
5133 current_defined_class = vm_defined_class_for_protected_call(me);
5134
5135 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5136 iseq != method_entry_iseqptr(me) &&
5137 !rb_obj_is_kind_of(recv, current_defined_class)) {
5138 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5139 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5140
5141 if (m) { /* not bound UnboundMethod */
5142 rb_raise(rb_eTypeError,
5143 "self has wrong type to call super in this context: "
5144 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5145 rb_obj_class(recv), m);
5146 }
5147 }
5148
5149 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5150 rb_raise(rb_eRuntimeError,
5151 "implicit argument passing of super from method defined"
5152 " by define_method() is not supported."
5153 " Specify all arguments explicitly.");
5154 }
5155
5156 ID mid = me->def->original_id;
5157
5158 if (!vm_ci_markable(cd->ci)) {
5159 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5160 }
5161 else {
5162 // update iseq. really? (TODO)
5163 cd->ci = vm_ci_new_runtime(mid,
5164 vm_ci_flag(cd->ci),
5165 vm_ci_argc(cd->ci),
5166 vm_ci_kwarg(cd->ci));
5167
5168 RB_OBJ_WRITTEN(iseq, Qundef, cd->ci);
5169 }
5170
5171 const struct rb_callcache *cc;
5172
5173 VALUE klass = vm_search_normal_superclass(me->defined_class);
5174
5175 if (!klass) {
5176 /* bound instance method of module */
5177 cc = vm_cc_new(Qundef, NULL, vm_call_method_missing, cc_type_super);
5178 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5179 }
5180 else {
5181 cc = vm_search_method_fastpath(reg_cfp, cd, klass);
5182 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5183
5184 // define_method can cache for different method id
5185 if (cached_cme == NULL) {
5186 // empty_cc_for_super is not markable object
5187 cd->cc = empty_cc_for_super();
5188 }
5189 else if (cached_cme->called_id != mid) {
5190 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5191 if (cme) {
5192 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5193 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5194 }
5195 else {
5196 cd->cc = cc = empty_cc_for_super();
5197 }
5198 }
5199 else {
5200 switch (cached_cme->def->type) {
5201 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5202 case VM_METHOD_TYPE_REFINED:
5203 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5204 case VM_METHOD_TYPE_ATTRSET:
5205 case VM_METHOD_TYPE_IVAR:
5206 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5207 break;
5208 default:
5209 break; // use fastpath
5210 }
5211 }
5212 }
5213
5214 VM_ASSERT((vm_cc_cme(cc), true));
5215
5216 return cc;
5217}
5218
5219/* yield */
5220
5221static inline int
5222block_proc_is_lambda(const VALUE procval)
5223{
5224 rb_proc_t *proc;
5225
5226 if (procval) {
5227 GetProcPtr(procval, proc);
5228 return proc->is_lambda;
5229 }
5230 else {
5231 return 0;
5232 }
5233}
5234
5235static VALUE
5236vm_yield_with_cfunc(rb_execution_context_t *ec,
5237 const struct rb_captured_block *captured,
5238 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5240{
5241 int is_lambda = FALSE; /* TODO */
5242 VALUE val, arg, blockarg;
5243 int frame_flag;
5244 const struct vm_ifunc *ifunc = captured->code.ifunc;
5245
5246 if (is_lambda) {
5247 arg = rb_ary_new4(argc, argv);
5248 }
5249 else if (argc == 0) {
5250 arg = Qnil;
5251 }
5252 else {
5253 arg = argv[0];
5254 }
5255
5256 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5257
5258 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5259 if (kw_splat) {
5260 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5261 }
5262
5263 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5264 frame_flag,
5265 self,
5266 VM_GUARDED_PREV_EP(captured->ep),
5267 (VALUE)me,
5268 0, ec->cfp->sp, 0, 0);
5269 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5270 rb_vm_pop_frame(ec);
5271
5272 return val;
5273}
5274
5275VALUE
5276rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5277{
5278 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5279}
5280
5281static VALUE
5282vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5283{
5284 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5285}
5286
5287static inline int
5288vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5289{
5290 int i;
5291 long len = RARRAY_LEN(ary);
5292
5293 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5294
5295 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5296 argv[i] = RARRAY_AREF(ary, i);
5297 }
5298
5299 return i;
5300}
5301
5302static inline VALUE
5303vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5304{
5305 VALUE ary, arg0 = argv[0];
5306 ary = rb_check_array_type(arg0);
5307#if 0
5308 argv[0] = arg0;
5309#else
5310 VM_ASSERT(argv[0] == arg0);
5311#endif
5312 return ary;
5313}
5314
5315static int
5316vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5317{
5318 if (rb_simple_iseq_p(iseq)) {
5319 rb_control_frame_t *cfp = ec->cfp;
5320 VALUE arg0;
5321
5322 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5323
5324 if (arg_setup_type == arg_setup_block &&
5325 calling->argc == 1 &&
5326 ISEQ_BODY(iseq)->param.flags.has_lead &&
5327 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5328 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5329 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5330 }
5331
5332 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5333 if (arg_setup_type == arg_setup_block) {
5334 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5335 int i;
5336 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5337 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5338 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5339 }
5340 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5341 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5342 }
5343 }
5344 else {
5345 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5346 }
5347 }
5348
5349 return 0;
5350 }
5351 else {
5352 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5353 }
5354}
5355
5356static int
5357vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5358{
5359 struct rb_calling_info calling_entry, *calling;
5360
5361 calling = &calling_entry;
5362 calling->argc = argc;
5363 calling->block_handler = block_handler;
5364 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5365 calling->recv = Qundef;
5366 calling->heap_argv = 0;
5367 calling->cc = NULL;
5368 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5369
5370 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5371}
5372
5373/* ruby iseq -> ruby block */
5374
5375static VALUE
5376vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5377 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5378 bool is_lambda, VALUE block_handler)
5379{
5380 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5381 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5382 const int arg_size = ISEQ_BODY(iseq)->param.size;
5383 VALUE * const rsp = GET_SP() - calling->argc;
5384 VALUE * const argv = rsp;
5385 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5386 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5387
5388 SET_SP(rsp);
5389
5390 vm_push_frame(ec, iseq,
5391 frame_flag,
5392 captured->self,
5393 VM_GUARDED_PREV_EP(captured->ep), 0,
5394 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5395 rsp + arg_size,
5396 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5397
5398 return Qundef;
5399}
5400
5401static VALUE
5402vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5403 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5404 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5405{
5406 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5407 int flags = vm_ci_flag(ci);
5408
5409 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5410 ((calling->argc == 0) ||
5411 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5412 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5413 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5414 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5415 flags = 0;
5416 if (UNLIKELY(calling->heap_argv)) {
5417#if VM_ARGC_STACK_MAX < 0
5418 if (RARRAY_LEN(calling->heap_argv) < 1) {
5419 rb_raise(rb_eArgError, "no receiver given");
5420 }
5421#endif
5422 calling->recv = rb_ary_shift(calling->heap_argv);
5423 // Modify stack to avoid cfp consistency error
5424 reg_cfp->sp++;
5425 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5426 reg_cfp->sp[-2] = calling->recv;
5427 flags |= VM_CALL_ARGS_SPLAT;
5428 }
5429 else {
5430 if (calling->argc < 1) {
5431 rb_raise(rb_eArgError, "no receiver given");
5432 }
5433 calling->recv = TOPN(--calling->argc);
5434 }
5435 if (calling->kw_splat) {
5436 flags |= VM_CALL_KW_SPLAT;
5437 }
5438 }
5439 else {
5440 if (calling->argc < 1) {
5441 rb_raise(rb_eArgError, "no receiver given");
5442 }
5443 calling->recv = TOPN(--calling->argc);
5444 }
5445
5446 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5447}
5448
5449static VALUE
5450vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5451 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5452 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5453{
5454 VALUE val;
5455 int argc;
5456 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5457 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5458 argc = calling->argc;
5459 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5460 POPN(argc); /* TODO: should put before C/yield? */
5461 return val;
5462}
5463
5464static VALUE
5465vm_proc_to_block_handler(VALUE procval)
5466{
5467 const struct rb_block *block = vm_proc_block(procval);
5468
5469 switch (vm_block_type(block)) {
5470 case block_type_iseq:
5471 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5472 case block_type_ifunc:
5473 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5474 case block_type_symbol:
5475 return VM_BH_FROM_SYMBOL(block->as.symbol);
5476 case block_type_proc:
5477 return VM_BH_FROM_PROC(block->as.proc);
5478 }
5479 VM_UNREACHABLE(vm_yield_with_proc);
5480 return Qundef;
5481}
5482
5483static VALUE
5484vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5485 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5486 bool is_lambda, VALUE block_handler)
5487{
5488 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5489 VALUE proc = VM_BH_TO_PROC(block_handler);
5490 is_lambda = block_proc_is_lambda(proc);
5491 block_handler = vm_proc_to_block_handler(proc);
5492 }
5493
5494 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5495}
5496
5497static inline VALUE
5498vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5499 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5500 bool is_lambda, VALUE block_handler)
5501{
5502 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5503 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5504 bool is_lambda, VALUE block_handler);
5505
5506 switch (vm_block_handler_type(block_handler)) {
5507 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5508 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5509 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5510 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5511 default: rb_bug("vm_invoke_block: unreachable");
5512 }
5513
5514 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5515}
5516
5517static VALUE
5518vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5519{
5520 const rb_execution_context_t *ec = GET_EC();
5521 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5522 struct rb_captured_block *captured;
5523
5524 if (cfp == 0) {
5525 rb_bug("vm_make_proc_with_iseq: unreachable");
5526 }
5527
5528 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5529 captured->code.iseq = blockiseq;
5530
5531 return rb_vm_make_proc(ec, captured, rb_cProc);
5532}
5533
5534static VALUE
5535vm_once_exec(VALUE iseq)
5536{
5537 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5538 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5539}
5540
5541static VALUE
5542vm_once_clear(VALUE data)
5543{
5544 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5545 is->once.running_thread = NULL;
5546 return Qnil;
5547}
5548
5549/* defined insn */
5550
5551static bool
5552check_respond_to_missing(VALUE obj, VALUE v)
5553{
5554 VALUE args[2];
5555 VALUE r;
5556
5557 args[0] = obj; args[1] = Qfalse;
5558 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5559 if (!UNDEF_P(r) && RTEST(r)) {
5560 return true;
5561 }
5562 else {
5563 return false;
5564 }
5565}
5566
5567static bool
5568vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5569{
5570 VALUE klass;
5571 enum defined_type type = (enum defined_type)op_type;
5572
5573 switch (type) {
5574 case DEFINED_IVAR:
5575 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5576 break;
5577 case DEFINED_GVAR:
5578 return rb_gvar_defined(SYM2ID(obj));
5579 break;
5580 case DEFINED_CVAR: {
5581 const rb_cref_t *cref = vm_get_cref(GET_EP());
5582 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5583 return rb_cvar_defined(klass, SYM2ID(obj));
5584 break;
5585 }
5586 case DEFINED_CONST:
5587 case DEFINED_CONST_FROM: {
5588 bool allow_nil = type == DEFINED_CONST;
5589 klass = v;
5590 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5591 break;
5592 }
5593 case DEFINED_FUNC:
5594 klass = CLASS_OF(v);
5595 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5596 break;
5597 case DEFINED_METHOD:{
5598 VALUE klass = CLASS_OF(v);
5599 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5600
5601 if (me) {
5602 switch (METHOD_ENTRY_VISI(me)) {
5603 case METHOD_VISI_PRIVATE:
5604 break;
5605 case METHOD_VISI_PROTECTED:
5606 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5607 break;
5608 }
5609 case METHOD_VISI_PUBLIC:
5610 return true;
5611 break;
5612 default:
5613 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5614 }
5615 }
5616 else {
5617 return check_respond_to_missing(obj, v);
5618 }
5619 break;
5620 }
5621 case DEFINED_YIELD:
5622 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5623 return true;
5624 }
5625 break;
5626 case DEFINED_ZSUPER:
5627 {
5628 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5629
5630 if (me) {
5631 VALUE klass = vm_search_normal_superclass(me->defined_class);
5632 if (!klass) return false;
5633
5634 ID id = me->def->original_id;
5635
5636 return rb_method_boundp(klass, id, 0);
5637 }
5638 }
5639 break;
5640 case DEFINED_REF:
5641 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5642 default:
5643 rb_bug("unimplemented defined? type (VM)");
5644 break;
5645 }
5646
5647 return false;
5648}
5649
5650bool
5651rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5652{
5653 return vm_defined(ec, reg_cfp, op_type, obj, v);
5654}
5655
5656static const VALUE *
5657vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5658{
5659 rb_num_t i;
5660 const VALUE *ep = reg_ep;
5661 for (i = 0; i < lv; i++) {
5662 ep = GET_PREV_EP(ep);
5663 }
5664 return ep;
5665}
5666
5667static VALUE
5668vm_get_special_object(const VALUE *const reg_ep,
5669 enum vm_special_object_type type)
5670{
5671 switch (type) {
5672 case VM_SPECIAL_OBJECT_VMCORE:
5673 return rb_mRubyVMFrozenCore;
5674 case VM_SPECIAL_OBJECT_CBASE:
5675 return vm_get_cbase(reg_ep);
5676 case VM_SPECIAL_OBJECT_CONST_BASE:
5677 return vm_get_const_base(reg_ep);
5678 default:
5679 rb_bug("putspecialobject insn: unknown value_type %d", type);
5680 }
5681}
5682
5683// ZJIT implementation is using the C function
5684// and needs to call a non-static function
5685VALUE
5686rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5687{
5688 return vm_get_special_object(reg_ep, type);
5689}
5690
5691static VALUE
5692vm_concat_array(VALUE ary1, VALUE ary2st)
5693{
5694 const VALUE ary2 = ary2st;
5695 VALUE tmp1 = rb_check_to_array(ary1);
5696 VALUE tmp2 = rb_check_to_array(ary2);
5697
5698 if (NIL_P(tmp1)) {
5699 tmp1 = rb_ary_new3(1, ary1);
5700 }
5701 if (tmp1 == ary1) {
5702 tmp1 = rb_ary_dup(ary1);
5703 }
5704
5705 if (NIL_P(tmp2)) {
5706 return rb_ary_push(tmp1, ary2);
5707 }
5708 else {
5709 return rb_ary_concat(tmp1, tmp2);
5710 }
5711}
5712
5713static VALUE
5714vm_concat_to_array(VALUE ary1, VALUE ary2st)
5715{
5716 /* ary1 must be a newly created array */
5717 const VALUE ary2 = ary2st;
5718
5719 if (NIL_P(ary2)) return ary1;
5720
5721 VALUE tmp2 = rb_check_to_array(ary2);
5722
5723 if (NIL_P(tmp2)) {
5724 return rb_ary_push(ary1, ary2);
5725 }
5726 else {
5727 return rb_ary_concat(ary1, tmp2);
5728 }
5729}
5730
5731// YJIT implementation is using the C function
5732// and needs to call a non-static function
5733VALUE
5734rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5735{
5736 return vm_concat_array(ary1, ary2st);
5737}
5738
5739VALUE
5740rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5741{
5742 return vm_concat_to_array(ary1, ary2st);
5743}
5744
5745static VALUE
5746vm_splat_array(VALUE flag, VALUE ary)
5747{
5748 if (NIL_P(ary)) {
5749 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5750 }
5751 VALUE tmp = rb_check_to_array(ary);
5752 if (NIL_P(tmp)) {
5753 return rb_ary_new3(1, ary);
5754 }
5755 else if (RTEST(flag)) {
5756 return rb_ary_dup(tmp);
5757 }
5758 else {
5759 return tmp;
5760 }
5761}
5762
5763// YJIT implementation is using the C function
5764// and needs to call a non-static function
5765VALUE
5766rb_vm_splat_array(VALUE flag, VALUE ary)
5767{
5768 return vm_splat_array(flag, ary);
5769}
5770
5771static VALUE
5772vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5773{
5774 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5775
5776 if (flag & VM_CHECKMATCH_ARRAY) {
5777 long i;
5778 const long n = RARRAY_LEN(pattern);
5779
5780 for (i = 0; i < n; i++) {
5781 VALUE v = RARRAY_AREF(pattern, i);
5782 VALUE c = check_match(ec, v, target, type);
5783
5784 if (RTEST(c)) {
5785 return c;
5786 }
5787 }
5788 return Qfalse;
5789 }
5790 else {
5791 return check_match(ec, pattern, target, type);
5792 }
5793}
5794
5795VALUE
5796rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5797{
5798 return vm_check_match(ec, target, pattern, flag);
5799}
5800
5801static VALUE
5802vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5803{
5804 const VALUE kw_bits = *(ep - bits);
5805
5806 if (FIXNUM_P(kw_bits)) {
5807 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5808 if ((idx < VM_KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5809 return Qfalse;
5810 }
5811 else {
5812 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5813 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5814 }
5815 return Qtrue;
5816}
5817
5818static void
5819vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5820{
5821 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5822 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5823 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5824 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5825
5826 switch (flag) {
5827 case RUBY_EVENT_CALL:
5828 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5829 return;
5830 case RUBY_EVENT_C_CALL:
5831 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5832 return;
5833 case RUBY_EVENT_RETURN:
5834 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5835 return;
5837 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5838 return;
5839 }
5840 }
5841}
5842
5843static VALUE
5844vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5845{
5846 if (!rb_const_defined_at(cbase, id)) {
5847 return 0;
5848 }
5849 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5850 return rb_public_const_get_at(cbase, id);
5851 }
5852 else {
5853 return rb_const_get_at(cbase, id);
5854 }
5855}
5856
5857static VALUE
5858vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5859{
5860 if (!RB_TYPE_P(klass, T_CLASS)) {
5861 return 0;
5862 }
5863 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5864 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5865
5866 if (tmp != super) {
5867 rb_raise(rb_eTypeError,
5868 "superclass mismatch for class %"PRIsVALUE"",
5869 rb_id2str(id));
5870 }
5871 else {
5872 return klass;
5873 }
5874 }
5875 else {
5876 return klass;
5877 }
5878}
5879
5880static VALUE
5881vm_check_if_module(ID id, VALUE mod)
5882{
5883 if (!RB_TYPE_P(mod, T_MODULE)) {
5884 return 0;
5885 }
5886 else {
5887 return mod;
5888 }
5889}
5890
5891static VALUE
5892declare_under(ID id, VALUE cbase, VALUE c)
5893{
5894 rb_set_class_path_string(c, cbase, rb_id2str(id));
5895 rb_const_set(cbase, id, c);
5896 return c;
5897}
5898
5899static VALUE
5900vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5901{
5902 /* new class declaration */
5903 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5904 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5905 rb_class_inherited(s, c);
5906 return c;
5907}
5908
5909static VALUE
5910vm_declare_module(ID id, VALUE cbase)
5911{
5912 /* new module declaration */
5913 return declare_under(id, cbase, rb_module_new());
5914}
5915
5916NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5917static void
5918unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5919{
5920 VALUE name = rb_id2str(id);
5921 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5922 name, type);
5923 VALUE location = rb_const_source_location_at(cbase, id);
5924 if (!NIL_P(location)) {
5925 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5926 " previous definition of %"PRIsVALUE" was here",
5927 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5928 }
5930}
5931
5932static VALUE
5933vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5934{
5935 VALUE klass;
5936
5937 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5938 rb_raise(rb_eTypeError,
5939 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5940 rb_obj_class(super));
5941 }
5942
5943 vm_check_if_namespace(cbase);
5944
5945 /* find klass */
5946 rb_autoload_load(cbase, id);
5947
5948 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5949 if (!vm_check_if_class(id, flags, super, klass))
5950 unmatched_redefinition("class", cbase, id, klass);
5951 return klass;
5952 }
5953 else {
5954 return vm_declare_class(id, flags, cbase, super);
5955 }
5956}
5957
5958static VALUE
5959vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5960{
5961 VALUE mod;
5962
5963 vm_check_if_namespace(cbase);
5964 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5965 if (!vm_check_if_module(id, mod))
5966 unmatched_redefinition("module", cbase, id, mod);
5967 return mod;
5968 }
5969 else {
5970 return vm_declare_module(id, cbase);
5971 }
5972}
5973
5974static VALUE
5975vm_find_or_create_class_by_id(ID id,
5976 rb_num_t flags,
5977 VALUE cbase,
5978 VALUE super)
5979{
5980 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5981
5982 switch (type) {
5983 case VM_DEFINECLASS_TYPE_CLASS:
5984 /* classdef returns class scope value */
5985 return vm_define_class(id, flags, cbase, super);
5986
5987 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5988 /* classdef returns class scope value */
5989 return rb_singleton_class(cbase);
5990
5991 case VM_DEFINECLASS_TYPE_MODULE:
5992 /* classdef returns class scope value */
5993 return vm_define_module(id, flags, cbase);
5994
5995 default:
5996 rb_bug("unknown defineclass type: %d", (int)type);
5997 }
5998}
5999
6000static rb_method_visibility_t
6001vm_scope_visibility_get(const rb_execution_context_t *ec)
6002{
6003 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6004
6005 if (!vm_env_cref_by_cref(cfp->ep)) {
6006 return METHOD_VISI_PUBLIC;
6007 }
6008 else {
6009 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6010 }
6011}
6012
6013static int
6014vm_scope_module_func_check(const rb_execution_context_t *ec)
6015{
6016 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6017
6018 if (!vm_env_cref_by_cref(cfp->ep)) {
6019 return FALSE;
6020 }
6021 else {
6022 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6023 }
6024}
6025
6026static void
6027vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6028{
6029 VALUE klass;
6030 rb_method_visibility_t visi;
6031 rb_cref_t *cref = vm_ec_cref(ec);
6032
6033 if (is_singleton) {
6034 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6035 visi = METHOD_VISI_PUBLIC;
6036 }
6037 else {
6038 klass = CREF_CLASS_FOR_DEFINITION(cref);
6039 visi = vm_scope_visibility_get(ec);
6040 }
6041
6042 if (NIL_P(klass)) {
6043 rb_raise(rb_eTypeError, "no class/module to add method");
6044 }
6045
6046 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6047 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6048 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) &&
6049 !RCLASS_SINGLETON_P(klass) &&
6050 (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6051 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6052 }
6053
6054 if (!is_singleton && vm_scope_module_func_check(ec)) {
6055 klass = rb_singleton_class(klass);
6056 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6057 }
6058}
6059
6060// Return the untagged block handler:
6061// * If it's VM_BLOCK_HANDLER_NONE, return nil
6062// * If it's an ISEQ or an IFUNC, fetch it from its rb_captured_block
6063// * If it's a PROC or SYMBOL, return it as is
6064VALUE
6065rb_vm_untag_block_handler(VALUE block_handler)
6066{
6067 if (VM_BLOCK_HANDLER_NONE == block_handler) return Qnil;
6068
6069 switch (vm_block_handler_type(block_handler)) {
6070 case block_handler_type_iseq:
6071 case block_handler_type_ifunc: {
6072 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
6073 return captured->code.val;
6074 }
6075 case block_handler_type_proc:
6076 case block_handler_type_symbol:
6077 return block_handler;
6078 default:
6079 rb_bug("rb_vm_untag_block_handler: unreachable");
6080 }
6081}
6082
6083VALUE
6084rb_vm_get_untagged_block_handler(rb_control_frame_t *reg_cfp)
6085{
6086 return rb_vm_untag_block_handler(VM_CF_BLOCK_HANDLER(reg_cfp));
6087}
6088
6089static VALUE
6090vm_invokeblock_i(struct rb_execution_context_struct *ec,
6091 struct rb_control_frame_struct *reg_cfp,
6092 struct rb_calling_info *calling)
6093{
6094 const struct rb_callinfo *ci = calling->cd->ci;
6095 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6096
6097 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6098 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6099 }
6100 else {
6101 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6102 }
6103}
6104
6105enum method_explorer_type {
6106 mexp_search_method,
6107 mexp_search_invokeblock,
6108 mexp_search_super,
6109};
6110
6111static inline VALUE
6112vm_sendish(
6113 struct rb_execution_context_struct *ec,
6114 struct rb_control_frame_struct *reg_cfp,
6115 struct rb_call_data *cd,
6116 VALUE block_handler,
6117 enum method_explorer_type method_explorer
6118) {
6119 VALUE val = Qundef;
6120 const struct rb_callinfo *ci = cd->ci;
6121 const struct rb_callcache *cc;
6122 int argc = vm_ci_argc(ci);
6123 VALUE recv = TOPN(argc);
6124 struct rb_calling_info calling = {
6125 .block_handler = block_handler,
6126 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6127 .recv = recv,
6128 .argc = argc,
6129 .cd = cd,
6130 };
6131
6132 switch (method_explorer) {
6133 case mexp_search_method:
6134 calling.cc = cc = vm_search_method_fastpath(reg_cfp, cd, CLASS_OF(recv));
6135 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6136 break;
6137 case mexp_search_super:
6138 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6139 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6140 break;
6141 case mexp_search_invokeblock:
6142 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6143 break;
6144 }
6145 return val;
6146}
6147
6148VALUE
6149rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6150{
6151 stack_check(ec);
6152 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6153 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6154 VM_EXEC(ec, val);
6155 return val;
6156}
6157
6158// Fallback for YJIT/ZJIT, not used by the interpreter
6159VALUE
6160rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6161{
6162 stack_check(ec);
6163
6164 struct rb_forwarding_call_data adjusted_cd;
6165 struct rb_callinfo adjusted_ci;
6166
6167 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6168
6169 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6170
6171 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6172 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6173 }
6174
6175 VM_EXEC(ec, val);
6176 return val;
6177}
6178
6179VALUE
6180rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6181{
6182 stack_check(ec);
6183 VALUE bh = VM_BLOCK_HANDLER_NONE;
6184 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6185 VM_EXEC(ec, val);
6186 return val;
6187}
6188
6189VALUE
6190rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6191{
6192 stack_check(ec);
6193
6194 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6195 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6196
6197 VM_EXEC(ec, val);
6198 return val;
6199}
6200
6201// Fallback for YJIT/ZJIT, not used by the interpreter
6202VALUE
6203rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6204{
6205 stack_check(ec);
6206 struct rb_forwarding_call_data adjusted_cd;
6207 struct rb_callinfo adjusted_ci;
6208
6209 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6210
6211 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6212
6213 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6214 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6215 }
6216
6217 VM_EXEC(ec, val);
6218 return val;
6219}
6220
6221VALUE
6222rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6223{
6224 stack_check(ec);
6225 VALUE bh = VM_BLOCK_HANDLER_NONE;
6226 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6227 VM_EXEC(ec, val);
6228 return val;
6229}
6230
6231/* object.c */
6232VALUE rb_nil_to_s(VALUE);
6233VALUE rb_true_to_s(VALUE);
6234VALUE rb_false_to_s(VALUE);
6235/* numeric.c */
6236VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6237VALUE rb_fix_to_s(VALUE);
6238/* variable.c */
6239VALUE rb_mod_to_s(VALUE);
6241
6242static VALUE
6243vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6244{
6245 int type = TYPE(recv);
6246 if (type == T_STRING) {
6247 return recv;
6248 }
6249
6250 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
6251
6252 switch (type) {
6253 case T_SYMBOL:
6254 if (check_method_basic_definition(cme)) {
6255 // rb_sym_to_s() allocates a mutable string, but since we are only
6256 // going to use this string for interpolation, it's fine to use the
6257 // frozen string.
6258 return rb_sym2str(recv);
6259 }
6260 break;
6261 case T_MODULE:
6262 case T_CLASS:
6263 if (check_cfunc(cme, rb_mod_to_s)) {
6264 // rb_mod_to_s() allocates a mutable string, but since we are only
6265 // going to use this string for interpolation, it's fine to use the
6266 // frozen string.
6267 VALUE val = rb_mod_name(recv);
6268 if (NIL_P(val)) {
6269 val = rb_mod_to_s(recv);
6270 }
6271 return val;
6272 }
6273 break;
6274 case T_NIL:
6275 if (check_cfunc(cme, rb_nil_to_s)) {
6276 return rb_nil_to_s(recv);
6277 }
6278 break;
6279 case T_TRUE:
6280 if (check_cfunc(cme, rb_true_to_s)) {
6281 return rb_true_to_s(recv);
6282 }
6283 break;
6284 case T_FALSE:
6285 if (check_cfunc(cme, rb_false_to_s)) {
6286 return rb_false_to_s(recv);
6287 }
6288 break;
6289 case T_FIXNUM:
6290 if (check_cfunc(cme, rb_int_to_s)) {
6291 return rb_fix_to_s(recv);
6292 }
6293 break;
6294 }
6295 return Qundef;
6296}
6297
6298// ZJIT implementation is using the C function
6299// and needs to call a non-static function
6300VALUE
6301rb_vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6302{
6303 return vm_objtostring(reg_cfp, recv, cd);
6304}
6305
6306static VALUE
6307vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6308{
6309 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6310 return ary;
6311 }
6312 else {
6313 return Qundef;
6314 }
6315}
6316
6317static VALUE
6318vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6319{
6320 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6321 return hash;
6322 }
6323 else {
6324 return Qundef;
6325 }
6326}
6327
6328static VALUE
6329vm_opt_str_freeze(VALUE str, int bop, ID id)
6330{
6331 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6332 return str;
6333 }
6334 else {
6335 return Qundef;
6336 }
6337}
6338
6339/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6340#define id_cmp idCmp
6341
6342static VALUE
6343vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6344{
6345 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6346 return rb_ary_includes(ary, target);
6347 }
6348 else {
6349 VALUE args[1] = {target};
6350
6351 // duparray
6352 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6353 VALUE dupary = rb_ary_resurrect(ary);
6354
6355 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6356 }
6357}
6358
6359VALUE
6360rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6361{
6362 return vm_opt_duparray_include_p(ec, ary, target);
6363}
6364
6365static VALUE
6366vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6367{
6368 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6369 if (array_len == 0) {
6370 return Qnil;
6371 }
6372 else {
6373 VALUE result = *ptr;
6374 rb_snum_t i = array_len - 1;
6375 while (i-- > 0) {
6376 const VALUE v = *++ptr;
6377 if (OPTIMIZED_CMP(v, result) > 0) {
6378 result = v;
6379 }
6380 }
6381 return result;
6382 }
6383 }
6384 else {
6385 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6386 }
6387}
6388
6389VALUE
6390rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6391{
6392 return vm_opt_newarray_max(ec, array_len, ptr);
6393}
6394
6395static VALUE
6396vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6397{
6398 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6399 if (array_len == 0) {
6400 return Qnil;
6401 }
6402 else {
6403 VALUE result = *ptr;
6404 rb_snum_t i = array_len - 1;
6405 while (i-- > 0) {
6406 const VALUE v = *++ptr;
6407 if (OPTIMIZED_CMP(v, result) < 0) {
6408 result = v;
6409 }
6410 }
6411 return result;
6412 }
6413 }
6414 else {
6415 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6416 }
6417}
6418
6419VALUE
6420rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6421{
6422 return vm_opt_newarray_min(ec, array_len, ptr);
6423}
6424
6425static VALUE
6426vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6427{
6428 // If Array#hash is _not_ monkeypatched, use the optimized call
6429 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6430 return rb_ary_hash_values(array_len, ptr);
6431 }
6432 else {
6433 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6434 }
6435}
6436
6437VALUE
6438rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6439{
6440 return vm_opt_newarray_hash(ec, array_len, ptr);
6441}
6442
6443VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6444VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6445
6446static VALUE
6447vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6448{
6449 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6450 struct RArray fake_ary = {RBASIC_INIT};
6451 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6452 return rb_ary_includes(ary, target);
6453 }
6454 else {
6455 VALUE args[1] = {target};
6456 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6457 }
6458}
6459
6460VALUE
6461rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6462{
6463 return vm_opt_newarray_include_p(ec, array_len, ptr, target);
6464}
6465
6466static VALUE
6467vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6468{
6469 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6470 struct RArray fake_ary = {RBASIC_INIT};
6471 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6472 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6473 }
6474 else {
6475 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6476 // Setup an array with room for keyword hash.
6477 VALUE args[2];
6478 args[0] = fmt;
6479 int kw_splat = RB_NO_KEYWORDS;
6480 int argc = 1;
6481
6482 if (!UNDEF_P(buffer)) {
6483 args[1] = rb_hash_new_with_size(1);
6484 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6485 kw_splat = RB_PASS_KEYWORDS;
6486 argc++;
6487 }
6488
6489 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idPack, argc, args, kw_splat);
6490 }
6491}
6492
6493VALUE
6494rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6495{
6496 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, buffer);
6497}
6498
6499VALUE
6500rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt)
6501{
6502 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, Qundef);
6503}
6504
6505#undef id_cmp
6506
6507static void
6508vm_track_constant_cache(ID id, void *ic)
6509{
6510 rb_vm_t *vm = GET_VM();
6511 struct rb_id_table *const_cache = &vm->constant_cache;
6512 VALUE lookup_result;
6513 set_table *ics;
6514
6515 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6516 ics = (set_table *)lookup_result;
6517 }
6518 else {
6519 ics = set_init_numtable();
6520 rb_id_table_insert(const_cache, id, (VALUE)ics);
6521 }
6522
6523 /* The call below to st_insert could allocate which could trigger a GC.
6524 * If it triggers a GC, it may free an iseq that also holds a cache to this
6525 * constant. If that iseq is the last iseq with a cache to this constant, then
6526 * it will free this ST table, which would cause an use-after-free during this
6527 * st_insert.
6528 *
6529 * So to fix this issue, we store the ID that is currently being inserted
6530 * and, in remove_from_constant_cache, we don't free the ST table for ID
6531 * equal to this one.
6532 *
6533 * See [Bug #20921].
6534 */
6535 vm->inserting_constant_cache_id = id;
6536
6537 set_insert(ics, (st_data_t)ic);
6538
6539 vm->inserting_constant_cache_id = (ID)0;
6540}
6541
6542static void
6543vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6544{
6545 RB_VM_LOCKING() {
6546 for (int i = 0; segments[i]; i++) {
6547 ID id = segments[i];
6548 if (id == idNULL) continue;
6549 vm_track_constant_cache(id, ic);
6550 }
6551 }
6552}
6553
6554// For JIT inlining
6555static inline bool
6556vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6557{
6558 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6559 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6560
6561 return (ic_cref == NULL || // no need to check CREF
6562 ic_cref == vm_get_cref(reg_ep));
6563 }
6564 return false;
6565}
6566
6567static bool
6568vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6569{
6570 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6571 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6572}
6573
6574// YJIT needs this function to never allocate and never raise
6575bool
6576rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6577{
6578 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6579}
6580
6581static void
6582vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6583{
6584 if (ruby_vm_const_missing_count > 0) {
6585 ruby_vm_const_missing_count = 0;
6586 ic->entry = NULL;
6587 return;
6588 }
6589
6590 struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6591 RB_OBJ_WRITE(ice, &ice->value, val);
6592 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6593
6594 if (rb_ractor_shareable_p(val)) {
6595 RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
6596 ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6597 }
6598 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6599 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6600 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6601 rb_yjit_constant_ic_update(iseq, ic, pos);
6602}
6603
6604VALUE
6605rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6606{
6607 VALUE val;
6608 const ID *segments = ic->segments;
6609 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6610
6611 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6612 val = ice->value;
6613
6614 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6615 }
6616 else {
6617 ruby_vm_constant_cache_misses++;
6618 val = vm_get_ev_const_chain(ec, segments);
6619 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6620 // Undo the PC increment to get the address to this instruction
6621 // INSN_ATTR(width) == 2
6622 vm_ic_update(CFP_ISEQ(GET_CFP()), ic, val, GET_EP(), CFP_PC(GET_CFP()) - 2);
6623 }
6624 return val;
6625}
6626
6627static VALUE
6628vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6629{
6630 rb_thread_t *th = rb_ec_thread_ptr(ec);
6631 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6632
6633 again:
6634 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6635 return is->once.value;
6636 }
6637 else if (is->once.running_thread == NULL) {
6638 VALUE val;
6639 is->once.running_thread = th;
6640 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6641 // TODO: confirm that it is shareable
6642
6643 if (RB_FL_ABLE(val)) {
6644 RB_OBJ_SET_SHAREABLE(val);
6645 }
6646
6647 RB_OBJ_WRITE(CFP_ISEQ(ec->cfp), &is->once.value, val);
6648
6649 /* is->once.running_thread is cleared by vm_once_clear() */
6650 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6651 return val;
6652 }
6653 else if (is->once.running_thread == th) {
6654 /* recursive once */
6655 return vm_once_exec((VALUE)iseq);
6656 }
6657 else {
6658 /* waiting for finish */
6659 RUBY_VM_CHECK_INTS(ec);
6661 goto again;
6662 }
6663}
6664
6665static OFFSET
6666vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6667{
6668 switch (OBJ_BUILTIN_TYPE(key)) {
6669 case -1:
6670 case T_FLOAT:
6671 case T_SYMBOL:
6672 case T_BIGNUM:
6673 case T_STRING:
6674 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6675 SYMBOL_REDEFINED_OP_FLAG |
6676 INTEGER_REDEFINED_OP_FLAG |
6677 FLOAT_REDEFINED_OP_FLAG |
6678 NIL_REDEFINED_OP_FLAG |
6679 TRUE_REDEFINED_OP_FLAG |
6680 FALSE_REDEFINED_OP_FLAG |
6681 STRING_REDEFINED_OP_FLAG)) {
6682 st_data_t val;
6683 if (RB_FLOAT_TYPE_P(key)) {
6684 double kval = RFLOAT_VALUE(key);
6685 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6686 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6687 }
6688 }
6689 if (rb_hash_stlike_lookup(hash, key, &val)) {
6690 return FIX2LONG((VALUE)val);
6691 }
6692 else {
6693 return else_offset;
6694 }
6695 }
6696 }
6697 return 0;
6698}
6699
6700NORETURN(static void
6701 vm_stack_consistency_error(const rb_execution_context_t *ec,
6702 const rb_control_frame_t *,
6703 const VALUE *));
6704static void
6705vm_stack_consistency_error(const rb_execution_context_t *ec,
6706 const rb_control_frame_t *cfp,
6707 const VALUE *bp)
6708{
6709 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6710 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6711 static const char stack_consistency_error[] =
6712 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6713#if defined RUBY_DEVEL
6714 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6715 rb_str_cat_cstr(mesg, "\n");
6716 rb_str_append(mesg, rb_iseq_disasm(CFP_ISEQ(cfp)));
6718#else
6719 rb_bug(stack_consistency_error, nsp, nbp);
6720#endif
6721}
6722
6723static VALUE
6724vm_opt_plus(VALUE recv, VALUE obj)
6725{
6726 if (FIXNUM_2_P(recv, obj) &&
6727 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6728 return rb_fix_plus_fix(recv, obj);
6729 }
6730 else if (FLONUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6732 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6733 }
6734 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6735 return Qundef;
6736 }
6737 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6738 RBASIC_CLASS(obj) == rb_cFloat &&
6739 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6740 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6741 }
6742 else if (RBASIC_CLASS(recv) == rb_cString &&
6743 RBASIC_CLASS(obj) == rb_cString &&
6744 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6745 return rb_str_opt_plus(recv, obj);
6746 }
6747 else if (RBASIC_CLASS(recv) == rb_cArray &&
6748 RBASIC_CLASS(obj) == rb_cArray &&
6749 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6750 return rb_ary_plus(recv, obj);
6751 }
6752 else {
6753 return Qundef;
6754 }
6755}
6756
6757static VALUE
6758vm_opt_minus(VALUE recv, VALUE obj)
6759{
6760 if (FIXNUM_2_P(recv, obj) &&
6761 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6762 return rb_fix_minus_fix(recv, obj);
6763 }
6764 else if (FLONUM_2_P(recv, obj) &&
6765 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6766 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6767 }
6768 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6769 return Qundef;
6770 }
6771 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6772 RBASIC_CLASS(obj) == rb_cFloat &&
6773 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6774 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6775 }
6776 else {
6777 return Qundef;
6778 }
6779}
6780
6781static VALUE
6782vm_opt_mult(VALUE recv, VALUE obj)
6783{
6784 if (FIXNUM_2_P(recv, obj) &&
6785 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6786 return rb_fix_mul_fix(recv, obj);
6787 }
6788 else if (FLONUM_2_P(recv, obj) &&
6789 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6790 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6791 }
6792 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6793 return Qundef;
6794 }
6795 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6796 RBASIC_CLASS(obj) == rb_cFloat &&
6797 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6798 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6799 }
6800 else {
6801 return Qundef;
6802 }
6803}
6804
6805static VALUE
6806vm_opt_div(VALUE recv, VALUE obj)
6807{
6808 if (FIXNUM_2_P(recv, obj) &&
6809 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6810 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6811 }
6812 else if (FLONUM_2_P(recv, obj) &&
6813 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6814 return rb_flo_div_flo(recv, obj);
6815 }
6816 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6817 return Qundef;
6818 }
6819 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6820 RBASIC_CLASS(obj) == rb_cFloat &&
6821 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6822 return rb_flo_div_flo(recv, obj);
6823 }
6824 else {
6825 return Qundef;
6826 }
6827}
6828
6829static VALUE
6830vm_opt_mod(VALUE recv, VALUE obj)
6831{
6832 if (FIXNUM_2_P(recv, obj) &&
6833 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6834 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6835 }
6836 else if (FLONUM_2_P(recv, obj) &&
6837 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6838 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6839 }
6840 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6841 return Qundef;
6842 }
6843 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6844 RBASIC_CLASS(obj) == rb_cFloat &&
6845 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6846 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6847 }
6848 else {
6849 return Qundef;
6850 }
6851}
6852
6853static VALUE
6854vm_opt_neq(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6855{
6856 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not_equal)) {
6857 VALUE val = opt_equality(reg_cfp, recv, obj, cd_eq);
6858
6859 if (!UNDEF_P(val)) {
6860 return RBOOL(!RTEST(val));
6861 }
6862 }
6863
6864 return Qundef;
6865}
6866
6867static VALUE
6868vm_opt_lt(VALUE recv, VALUE obj)
6869{
6870 if (FIXNUM_2_P(recv, obj) &&
6871 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6872 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6873 }
6874 else if (FLONUM_2_P(recv, obj) &&
6875 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6876 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6877 }
6878 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6879 return Qundef;
6880 }
6881 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6882 RBASIC_CLASS(obj) == rb_cFloat &&
6883 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6884 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6885 }
6886 else {
6887 return Qundef;
6888 }
6889}
6890
6891static VALUE
6892vm_opt_le(VALUE recv, VALUE obj)
6893{
6894 if (FIXNUM_2_P(recv, obj) &&
6895 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6896 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6897 }
6898 else if (FLONUM_2_P(recv, obj) &&
6899 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6900 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6901 }
6902 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6903 return Qundef;
6904 }
6905 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6906 RBASIC_CLASS(obj) == rb_cFloat &&
6907 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6908 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6909 }
6910 else {
6911 return Qundef;
6912 }
6913}
6914
6915static VALUE
6916vm_opt_gt(VALUE recv, VALUE obj)
6917{
6918 if (FIXNUM_2_P(recv, obj) &&
6919 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6920 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6921 }
6922 else if (FLONUM_2_P(recv, obj) &&
6923 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6924 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6925 }
6926 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6927 return Qundef;
6928 }
6929 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6930 RBASIC_CLASS(obj) == rb_cFloat &&
6931 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6932 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6933 }
6934 else {
6935 return Qundef;
6936 }
6937}
6938
6939static VALUE
6940vm_opt_ge(VALUE recv, VALUE obj)
6941{
6942 if (FIXNUM_2_P(recv, obj) &&
6943 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6944 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6945 }
6946 else if (FLONUM_2_P(recv, obj) &&
6947 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6948 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6949 }
6950 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6951 return Qundef;
6952 }
6953 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6954 RBASIC_CLASS(obj) == rb_cFloat &&
6955 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6956 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6957 }
6958 else {
6959 return Qundef;
6960 }
6961}
6962
6963
6964static VALUE
6965vm_opt_ltlt(VALUE recv, VALUE obj)
6966{
6967 if (SPECIAL_CONST_P(recv)) {
6968 return Qundef;
6969 }
6970 else if (RBASIC_CLASS(recv) == rb_cString &&
6971 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6972 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6973 return rb_str_buf_append(recv, obj);
6974 }
6975 else {
6976 return rb_str_concat(recv, obj);
6977 }
6978 }
6979 else if (RBASIC_CLASS(recv) == rb_cArray &&
6980 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6981 return rb_ary_push(recv, obj);
6982 }
6983 else {
6984 return Qundef;
6985 }
6986}
6987
6988static VALUE
6989vm_opt_and(VALUE recv, VALUE obj)
6990{
6991 // If recv and obj are both fixnums, then the bottom tag bit
6992 // will be 1 on both. 1 & 1 == 1, so the result value will also
6993 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6994 // will be 0, and we return Qundef.
6995 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6996
6997 if (FIXNUM_P(ret) &&
6998 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6999 return ret;
7000 }
7001 else {
7002 return Qundef;
7003 }
7004}
7005
7006static VALUE
7007vm_opt_or(VALUE recv, VALUE obj)
7008{
7009 if (FIXNUM_2_P(recv, obj) &&
7010 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
7011 return recv | obj;
7012 }
7013 else {
7014 return Qundef;
7015 }
7016}
7017
7018static VALUE
7019vm_opt_aref(VALUE recv, VALUE obj)
7020{
7021 if (SPECIAL_CONST_P(recv)) {
7022 if (FIXNUM_2_P(recv, obj) &&
7023 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
7024 return rb_fix_aref(recv, obj);
7025 }
7026 return Qundef;
7027 }
7028 else if (RBASIC_CLASS(recv) == rb_cArray &&
7029 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7030 if (FIXNUM_P(obj)) {
7031 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7032 }
7033 else {
7034 return rb_ary_aref1(recv, obj);
7035 }
7036 }
7037 else if (RBASIC_CLASS(recv) == rb_cHash &&
7038 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7039 return rb_hash_aref(recv, obj);
7040 }
7041 else {
7042 return Qundef;
7043 }
7044}
7045
7046static VALUE
7047vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7048{
7049 if (SPECIAL_CONST_P(recv)) {
7050 return Qundef;
7051 }
7052 else if (RBASIC_CLASS(recv) == rb_cArray &&
7053 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7054 FIXNUM_P(obj)) {
7055 rb_ary_store(recv, FIX2LONG(obj), set);
7056 return set;
7057 }
7058 else if (RBASIC_CLASS(recv) == rb_cHash &&
7059 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7060 rb_hash_aset(recv, obj, set);
7061 return set;
7062 }
7063 else {
7064 return Qundef;
7065 }
7066}
7067
7068static VALUE
7069vm_opt_length(VALUE recv, int bop)
7070{
7071 if (SPECIAL_CONST_P(recv)) {
7072 return Qundef;
7073 }
7074 else if (RBASIC_CLASS(recv) == rb_cString &&
7075 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7076 if (bop == BOP_EMPTY_P) {
7077 return LONG2NUM(RSTRING_LEN(recv));
7078 }
7079 else {
7080 return rb_str_length(recv);
7081 }
7082 }
7083 else if (RBASIC_CLASS(recv) == rb_cArray &&
7084 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7085 return LONG2NUM(RARRAY_LEN(recv));
7086 }
7087 else if (RBASIC_CLASS(recv) == rb_cHash &&
7088 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7089 return INT2FIX(RHASH_SIZE(recv));
7090 }
7091 else {
7092 return Qundef;
7093 }
7094}
7095
7096static VALUE
7097vm_opt_empty_p(VALUE recv)
7098{
7099 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7100 case Qundef: return Qundef;
7101 case INT2FIX(0): return Qtrue;
7102 default: return Qfalse;
7103 }
7104}
7105
7106VALUE rb_false(VALUE obj);
7107
7108static VALUE
7109vm_opt_nil_p(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7110{
7111 if (NIL_P(recv) &&
7112 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7113 return Qtrue;
7114 }
7115 else if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_false)) {
7116 return Qfalse;
7117 }
7118 else {
7119 return Qundef;
7120 }
7121}
7122
7123static VALUE
7124fix_succ(VALUE x)
7125{
7126 switch (x) {
7127 case ~0UL:
7128 /* 0xFFFF_FFFF == INT2FIX(-1)
7129 * `-1.succ` is of course 0. */
7130 return INT2FIX(0);
7131 case RSHIFT(~0UL, 1):
7132 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7133 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7134 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7135 default:
7136 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7137 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7138 * == lx*2 + ly*2 + 1
7139 * == (lx*2+1) + (ly*2+1) - 1
7140 * == x + y - 1
7141 *
7142 * Here, if we put y := INT2FIX(1):
7143 *
7144 * == x + INT2FIX(1) - 1
7145 * == x + 2 .
7146 */
7147 return x + 2;
7148 }
7149}
7150
7151static VALUE
7152vm_opt_succ(VALUE recv)
7153{
7154 if (FIXNUM_P(recv) &&
7155 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7156 return fix_succ(recv);
7157 }
7158 else if (SPECIAL_CONST_P(recv)) {
7159 return Qundef;
7160 }
7161 else if (RBASIC_CLASS(recv) == rb_cString &&
7162 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7163 return rb_str_succ(recv);
7164 }
7165 else {
7166 return Qundef;
7167 }
7168}
7169
7170static VALUE
7171vm_opt_not(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7172{
7173 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not)) {
7174 return RBOOL(!RTEST(recv));
7175 }
7176 else {
7177 return Qundef;
7178 }
7179}
7180
7181static VALUE
7182vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7183{
7184 if (SPECIAL_CONST_P(recv)) {
7185 return Qundef;
7186 }
7187 else if (RBASIC_CLASS(recv) == rb_cString &&
7188 CLASS_OF(obj) == rb_cRegexp &&
7189 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7190 return rb_reg_match(obj, recv);
7191 }
7192 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7193 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7194 return rb_reg_match(recv, obj);
7195 }
7196 else {
7197 return Qundef;
7198 }
7199}
7200
7201rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7202
7203NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7204
7205static inline void
7206vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7207 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7208 rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
7209{
7210 rb_event_flag_t event = pc_events & target_event;
7211 VALUE self = GET_SELF();
7212
7213 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7214
7215 if (local_hooks) local_hooks->running++; // make sure they don't get deleted while global hooks run
7216
7217 if (event & global_hooks->events) {
7218 /* increment PC because source line is calculated with PC-1 */
7219 reg_cfp->pc++;
7220 vm_dtrace(event, ec);
7221 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7222 reg_cfp->pc--;
7223 }
7224
7225 if (local_hooks) local_hooks->running--;
7226 if (local_hooks != NULL) {
7227 if (event & local_hooks->events) {
7228 /* increment PC because source line is calculated with PC-1 */
7229 reg_cfp->pc++;
7230 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7231 reg_cfp->pc--;
7232 }
7233 }
7234}
7235
7236#define VM_TRACE_HOOK(target_event, val) do { \
7237 if ((pc_events & (target_event)) & enabled_flags) { \
7238 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
7239 } \
7240} while (0)
7241
7242static VALUE
7243rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7244{
7245 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7246 VM_ASSERT(ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_RESCUE);
7247 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7248}
7249
7250static void
7251vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7252{
7253 const VALUE *pc = reg_cfp->pc;
7254 rb_ractor_t *r = rb_ec_ractor_ptr(ec);
7255 rb_event_flag_t enabled_flags = r->pub.hooks.events & ISEQ_TRACE_EVENTS;
7256 rb_event_flag_t ractor_events = enabled_flags;
7257
7258 if (enabled_flags == 0 && rb_ractor_targeted_hooks_cnt(r) == 0) {
7259 return;
7260 }
7261 else {
7262 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
7263 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7264 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7265 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
7266 rb_hook_list_t *local_hooks = NULL;
7267 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
7268 st_data_t val;
7269 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)iseq, &val)) {
7270 local_hooks = (rb_hook_list_t*)val;
7271 }
7272 }
7273 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7274
7275 rb_hook_list_t *bmethod_local_hooks = NULL;
7276 rb_event_flag_t bmethod_local_events = 0;
7277 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7278 enabled_flags |= iseq_local_events;
7279
7280 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7281
7282 if (bmethod_frame) {
7283 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7284 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7285 unsigned int bmethod_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
7286 if (RB_UNLIKELY(bmethod_hooks_cnt > 0)) {
7287 st_data_t val;
7288 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)me->def, &val)) {
7289 bmethod_local_hooks = (rb_hook_list_t*)val;
7290 }
7291 if (bmethod_local_hooks) {
7292 bmethod_local_events = bmethod_local_hooks->events;
7293 }
7294 }
7295 }
7296
7297 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7298#if 0
7299 /* disable trace */
7300 /* TODO: incomplete */
7301 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7302#else
7303 /* do not disable trace because of performance problem
7304 * (re-enable overhead)
7305 */
7306#endif
7307 return;
7308 }
7309 else if (ec->trace_arg != NULL) {
7310 /* already tracing */
7311 return;
7312 }
7313 else {
7314 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7315 /* Note, not considering iseq local events here since the same
7316 * iseq could be used in multiple bmethods. */
7317 rb_event_flag_t bmethod_events = ractor_events | bmethod_local_events;
7318
7319 if (0) {
7320 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7321 (int)pos,
7322 (int)pc_events,
7323 RSTRING_PTR(rb_iseq_path(iseq)),
7324 (int)rb_iseq_line_no(iseq, pos),
7325 RSTRING_PTR(rb_iseq_label(iseq)));
7326 }
7327 VM_ASSERT(reg_cfp->pc == pc);
7328 VM_ASSERT(pc_events != 0);
7329
7330 /* check traces */
7331 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7332 /* b_call instruction running as a method. Fire call event. */
7333 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
7334 }
7336 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7337 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7338 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7339 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7340 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7341 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7342 /* b_return instruction running as a method. Fire return event. */
7343 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
7344 }
7345 }
7346 }
7347}
7348#undef VM_TRACE_HOOK
7349
7350#if VM_CHECK_MODE > 0
7351NORETURN( NOINLINE( COLDFUNC
7352void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7353
7354void
7355Init_vm_stack_canary(void)
7356{
7357 /* This has to be called _after_ our PRNG is properly set up. */
7358 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7359 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7360
7361 vm_stack_canary_was_born = true;
7362 VM_ASSERT(n == 0);
7363}
7364
7365void
7366rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7367{
7368 /* Because a method has already been called, why not call
7369 * another one. */
7370 const char *insn = rb_insns_name(i);
7371 VALUE inspection = rb_inspect(c);
7372 const char *str = StringValueCStr(inspection);
7373
7374 rb_bug("dead canary found at %s: %s", insn, str);
7375}
7376
7377#else
7378void Init_vm_stack_canary(void) { /* nothing to do */ }
7379#endif
7380
7381
7382/* a part of the following code is generated by this ruby script:
7383
738416.times{|i|
7385 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7386 typedef_args.prepend(", ") if i != 0
7387 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7388 call_args.prepend(", ") if i != 0
7389 puts %Q{
7390static VALUE
7391builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7392{
7393 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7394 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7395}}
7396}
7397
7398puts
7399puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
740016.times{|i|
7401 puts " builtin_invoker#{i},"
7402}
7403puts "};"
7404*/
7405
7406static VALUE
7407builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7408{
7409 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7410 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7411}
7412
7413static VALUE
7414builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7415{
7416 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7417 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7418}
7419
7420static VALUE
7421builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7422{
7423 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7424 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7425}
7426
7427static VALUE
7428builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7429{
7430 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7431 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7432}
7433
7434static VALUE
7435builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7436{
7437 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7438 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7439}
7440
7441static VALUE
7442builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7443{
7444 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7445 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7446}
7447
7448static VALUE
7449builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7450{
7451 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7452 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7453}
7454
7455static VALUE
7456builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7457{
7458 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7459 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7460}
7461
7462static VALUE
7463builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7464{
7465 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7466 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7467}
7468
7469static VALUE
7470builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7471{
7472 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7473 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7474}
7475
7476static VALUE
7477builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7478{
7479 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7480 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7481}
7482
7483static VALUE
7484builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7485{
7486 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7487 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7488}
7489
7490static VALUE
7491builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7492{
7493 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7494 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7495}
7496
7497static VALUE
7498builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7499{
7500 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7501 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7502}
7503
7504static VALUE
7505builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7506{
7507 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7508 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7509}
7510
7511static VALUE
7512builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7513{
7514 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7515 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7516}
7517
7518typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7519
7520static builtin_invoker
7521lookup_builtin_invoker(int argc)
7522{
7523 static const builtin_invoker invokers[] = {
7524 builtin_invoker0,
7525 builtin_invoker1,
7526 builtin_invoker2,
7527 builtin_invoker3,
7528 builtin_invoker4,
7529 builtin_invoker5,
7530 builtin_invoker6,
7531 builtin_invoker7,
7532 builtin_invoker8,
7533 builtin_invoker9,
7534 builtin_invoker10,
7535 builtin_invoker11,
7536 builtin_invoker12,
7537 builtin_invoker13,
7538 builtin_invoker14,
7539 builtin_invoker15,
7540 };
7541
7542 return invokers[argc];
7543}
7544
7545static inline VALUE
7546invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7547{
7548 const bool canary_p = ISEQ_BODY(CFP_ISEQ(reg_cfp))->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7549 SETUP_CANARY(canary_p);
7550 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7551 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7552 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7553 return ret;
7554}
7555
7556static VALUE
7557vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7558{
7559 return invoke_bf(ec, cfp, bf, argv);
7560}
7561
7562static VALUE
7563vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7564{
7565 if (0) { // debug print
7566 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7567 for (int i=0; i<bf->argc; i++) {
7568 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(CFP_ISEQ(cfp))->local_table[i+start_index]));
7569 }
7570 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7571 (void *)(uintptr_t)bf->func_ptr);
7572 }
7573
7574 if (bf->argc == 0) {
7575 return invoke_bf(ec, cfp, bf, NULL);
7576 }
7577 else {
7578 const VALUE *argv = cfp->ep - ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7579 return invoke_bf(ec, cfp, bf, argv);
7580 }
7581}
7582
7583// for __builtin_inline!()
7584
7585VALUE
7586rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7587{
7588 const rb_control_frame_t *cfp = ec->cfp;
7589 return cfp->ep[index];
7590}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
static bool RB_FL_ABLE(VALUE obj)
Checks if the object is flaggable.
Definition fl_type.h:381
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2821
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1591
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1488
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1467
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:126
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3898
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
VALUE rb_eFatal
fatal exception.
Definition error.c:1423
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1435
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:674
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4219
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1478
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2254
VALUE rb_cRegexp
Regexp class.
Definition re.c:2650
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1325
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:657
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:226
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:894
VALUE rb_cFloat
Float class.
Definition numeric.c:198
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:468
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:456
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1169
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1935
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3782
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1910
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:1992
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1893
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1959
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2025
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3839
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5384
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3805
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4076
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2440
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:968
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1514
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3520
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2047
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4307
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4368
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1515
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3998
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3350
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3526
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:423
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2126
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3858
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4391
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3852
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:689
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1740
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2310
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1164
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument must be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:289
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:37
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:50
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:59
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376