Ruby 4.1.0dev (2026-04-17 revision 11e3c78b61da705c783dd12fb7f158c0d256ede0)
vm_insnhelper.c (11e3c78b61da705c783dd12fb7f158c0d256ede0)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions. Included into vm.c.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 ._iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (CREF_DYNAMIC(cref) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref))) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* no dynamic singleton class or cloned class found */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%"PRIsVALUE" by non-main ractor.", rb_class_path(klass), rb_id2str(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 ent->cref = cref;
1621 ic->entry = ent;
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1625 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1626 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1627
1628 return cvar_value;
1629}
1630
1631static inline VALUE
1632vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1633{
1634 const rb_cref_t *cref;
1635 cref = vm_get_cref(GET_EP());
1636
1637 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1638 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1639
1640 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1641 RUBY_ASSERT(!UNDEF_P(v));
1642
1643 return v;
1644 }
1645
1646 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647
1648 return update_classvariable_cache(iseq, klass, id, cref, ic);
1649}
1650
1651VALUE
1652rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1653{
1654 return vm_getclassvariable(iseq, cfp, id, ic);
1655}
1656
1657static inline void
1658vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1659{
1660 const rb_cref_t *cref;
1661 cref = vm_get_cref(GET_EP());
1662
1663 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1664 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1665
1666 rb_class_ivar_set(ic->entry->class_value, id, val);
1667 return;
1668 }
1669
1670 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1671
1672 rb_cvar_set(klass, id, val);
1673
1674 update_classvariable_cache(iseq, klass, id, cref, ic);
1675}
1676
1677void
1678rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1679{
1680 vm_setclassvariable(iseq, cfp, id, val, ic);
1681}
1682
1683ALWAYS_INLINE(static VALUE vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic));
1684static inline VALUE
1685vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1686{
1687 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1688}
1689
1690static inline void
1691vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1692{
1693 if (RB_SPECIAL_CONST_P(obj)) {
1695 return;
1696 }
1697
1698 shape_id_t dest_shape_id;
1699 attr_index_t index;
1700 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1701
1702 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1703 switch (BUILTIN_TYPE(obj)) {
1704 case T_OBJECT:
1705 break;
1706 case T_CLASS:
1707 case T_MODULE:
1708 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1709 return;
1710 }
1711 break;
1712 default:
1713 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1714 return;
1715 }
1716 }
1717 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1718 }
1719}
1720
1721void
1722rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1723{
1724 vm_setinstancevariable(iseq, obj, id, val, ic);
1725}
1726
1727VALUE
1728rb_vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1729{
1730 return vm_getinstancevariable(iseq, obj, id, ic);
1731}
1732
1733static VALUE
1734vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1735{
1736 /* continue throw */
1737
1738 if (FIXNUM_P(err)) {
1739 ec->tag->state = RUBY_TAG_FATAL;
1740 }
1741 else if (SYMBOL_P(err)) {
1742 ec->tag->state = TAG_THROW;
1743 }
1744 else if (THROW_DATA_P(err)) {
1745 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1746 }
1747 else {
1748 ec->tag->state = TAG_RAISE;
1749 }
1750 return err;
1751}
1752
1753static VALUE
1754vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1755 const int flag, const VALUE throwobj)
1756{
1757 const rb_control_frame_t *escape_cfp = NULL;
1758 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1759
1760 if (flag != 0) {
1761 /* do nothing */
1762 }
1763 else if (state == TAG_BREAK) {
1764 int is_orphan = 1;
1765 const VALUE *ep = GET_EP();
1766 const rb_iseq_t *base_iseq = GET_ISEQ();
1767 escape_cfp = reg_cfp;
1768
1769 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1770 if (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1771 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1772 ep = escape_cfp->ep;
1773 base_iseq = CFP_ISEQ(escape_cfp);
1774 }
1775 else {
1776 ep = VM_ENV_PREV_EP(ep);
1777 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1778 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1779 VM_ASSERT(CFP_ISEQ(escape_cfp) == base_iseq);
1780 }
1781 }
1782
1783 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1784 /* lambda{... break ...} */
1785 is_orphan = 0;
1786 state = TAG_RETURN;
1787 }
1788 else {
1789 ep = VM_ENV_PREV_EP(ep);
1790
1791 while (escape_cfp < eocfp) {
1792 if (escape_cfp->ep == ep) {
1793 const rb_iseq_t *const iseq = CFP_ISEQ(escape_cfp);
1794 const VALUE epc = CFP_PC(escape_cfp) - ISEQ_BODY(iseq)->iseq_encoded;
1795 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1796 unsigned int i;
1797
1798 if (!ct) break;
1799 for (i=0; i < ct->size; i++) {
1800 const struct iseq_catch_table_entry *const entry =
1801 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1802
1803 if (entry->type == CATCH_TYPE_BREAK &&
1804 entry->iseq == base_iseq &&
1805 entry->start < epc && entry->end >= epc) {
1806 if (entry->cont == epc) { /* found! */
1807 is_orphan = 0;
1808 }
1809 break;
1810 }
1811 }
1812 break;
1813 }
1814
1815 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1816 }
1817 }
1818
1819 if (is_orphan) {
1820 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1821 }
1822 }
1823 else if (state == TAG_RETRY) {
1824 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1825
1826 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1827 }
1828 else if (state == TAG_RETURN) {
1829 const VALUE *current_ep = GET_EP();
1830 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1831 int in_class_frame = 0;
1832 int toplevel = 1;
1833 escape_cfp = reg_cfp;
1834
1835 // find target_lep, target_ep
1836 while (!VM_ENV_LOCAL_P(ep)) {
1837 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1838 target_ep = ep;
1839 }
1840 ep = VM_ENV_PREV_EP(ep);
1841 }
1842 target_lep = ep;
1843
1844 while (escape_cfp < eocfp) {
1845 const VALUE *lep = VM_CF_LEP(escape_cfp);
1846
1847 if (!target_lep) {
1848 target_lep = lep;
1849 }
1850
1851 if (lep == target_lep &&
1852 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1853 ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1854 in_class_frame = 1;
1855 target_lep = 0;
1856 }
1857
1858 if (lep == target_lep) {
1859 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1860 toplevel = 0;
1861 if (in_class_frame) {
1862 /* lambda {class A; ... return ...; end} */
1863 goto valid_return;
1864 }
1865 else {
1866 const VALUE *tep = current_ep;
1867
1868 while (target_lep != tep) {
1869 if (escape_cfp->ep == tep) {
1870 /* in lambda */
1871 if (tep == target_ep) {
1872 goto valid_return;
1873 }
1874 else {
1875 goto unexpected_return;
1876 }
1877 }
1878 tep = VM_ENV_PREV_EP(tep);
1879 }
1880 }
1881 }
1882 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1883 switch (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type) {
1884 case ISEQ_TYPE_TOP:
1885 case ISEQ_TYPE_MAIN:
1886 if (toplevel) {
1887 if (in_class_frame) goto unexpected_return;
1888 if (target_ep == NULL) {
1889 goto valid_return;
1890 }
1891 else {
1892 goto unexpected_return;
1893 }
1894 }
1895 break;
1896 case ISEQ_TYPE_EVAL: {
1897 const rb_iseq_t *is = CFP_ISEQ(escape_cfp);
1898 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1899 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1900 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1901 t = ISEQ_BODY(is)->type;
1902 }
1903 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1904 break;
1905 }
1906 case ISEQ_TYPE_CLASS:
1907 toplevel = 0;
1908 break;
1909 default:
1910 break;
1911 }
1912 }
1913 }
1914
1915 if (escape_cfp->ep == target_lep && ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_METHOD) {
1916 if (target_ep == NULL) {
1917 goto valid_return;
1918 }
1919 else {
1920 goto unexpected_return;
1921 }
1922 }
1923
1924 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1925 }
1926 unexpected_return:;
1927 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1928
1929 valid_return:;
1930 /* do nothing */
1931 }
1932 else {
1933 rb_bug("isns(throw): unsupported throw type");
1934 }
1935
1936 ec->tag->state = state;
1937 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1938}
1939
1940static VALUE
1941vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1942 rb_num_t throw_state, VALUE throwobj)
1943{
1944 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1945 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1946
1947 if (state != 0) {
1948 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1949 }
1950 else {
1951 return vm_throw_continue(ec, throwobj);
1952 }
1953}
1954
1955VALUE
1956rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1957{
1958 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1959}
1960
1961static inline void
1962vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1963{
1964 int is_splat = flag & 0x01;
1965 const VALUE *ptr;
1966 rb_num_t len;
1967 const VALUE obj = ary;
1968
1969 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1970 ary = obj;
1971 ptr = &ary;
1972 len = 1;
1973 }
1974 else {
1975 ptr = RARRAY_CONST_PTR(ary);
1976 len = (rb_num_t)RARRAY_LEN(ary);
1977 }
1978
1979 if (num + is_splat == 0) {
1980 /* no space left on stack */
1981 }
1982 else if (flag & 0x02) {
1983 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1984 rb_num_t i = 0, j;
1985
1986 if (len < num) {
1987 for (i = 0; i < num - len; i++) {
1988 *cfp->sp++ = Qnil;
1989 }
1990 }
1991
1992 for (j = 0; i < num; i++, j++) {
1993 VALUE v = ptr[len - j - 1];
1994 *cfp->sp++ = v;
1995 }
1996
1997 if (is_splat) {
1998 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1999 }
2000 }
2001 else {
2002 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
2003 if (is_splat) {
2004 if (num > len) {
2005 *cfp->sp++ = rb_ary_new();
2006 }
2007 else {
2008 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2009 }
2010 }
2011
2012 if (num > len) {
2013 rb_num_t i = 0;
2014 for (; i < num - len; i++) {
2015 *cfp->sp++ = Qnil;
2016 }
2017
2018 for (rb_num_t j = 0; i < num; i++, j++) {
2019 *cfp->sp++ = ptr[len - j - 1];
2020 }
2021 }
2022 else {
2023 for (rb_num_t j = 0; j < num; j++) {
2024 *cfp->sp++ = ptr[num - j - 1];
2025 }
2026 }
2027 }
2028
2029 RB_GC_GUARD(ary);
2030}
2031
2032static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2033
2034static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2035
2036static struct rb_class_cc_entries *
2037vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2038{
2039 int initial_capa = 2;
2040 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2041#if VM_CHECK_MODE > 0
2042 ccs->debug_sig = ~(VALUE)ccs;
2043#endif
2044 ccs->capa = initial_capa;
2045 ccs->len = 0;
2046 ccs->cme = cme;
2047 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2048
2049 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2050 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2051 return ccs;
2052}
2053
2054static void
2055vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2056{
2057 if (! vm_cc_markable(cc)) {
2058 return;
2059 }
2060
2061 if (UNLIKELY(ccs->len == ccs->capa)) {
2062 RUBY_ASSERT(ccs->capa > 0);
2063 ccs->capa *= 2;
2064 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2065#if VM_CHECK_MODE > 0
2066 ccs->debug_sig = ~(VALUE)ccs;
2067#endif
2068 // GC?
2069 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2070 }
2071 VM_ASSERT(ccs->len < ccs->capa);
2072
2073 const int pos = ccs->len++;
2074 ccs->entries[pos].argc = vm_ci_argc(ci);
2075 ccs->entries[pos].flag = vm_ci_flag(ci);
2076 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2077
2078 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2079 // for tuning
2080 // vm_mtbl_dump(klass, 0);
2081 }
2082}
2083
2084#if VM_CHECK_MODE > 0
2085void
2086rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2087{
2088 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2089 for (int i=0; i<ccs->len; i++) {
2090 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2091 ccs->entries[i].flag,
2092 ccs->entries[i].argc);
2093 rp(ccs->entries[i].cc);
2094 }
2095}
2096
2097static int
2098vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2099{
2100 VM_ASSERT(vm_ccs_p(ccs));
2101 VM_ASSERT(ccs->len <= ccs->capa);
2102
2103 for (int i=0; i<ccs->len; i++) {
2104 const struct rb_callcache *cc = ccs->entries[i].cc;
2105
2106 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2107 VM_ASSERT(vm_cc_class_check(cc, klass));
2108 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2109 VM_ASSERT(!vm_cc_super_p(cc));
2110 VM_ASSERT(!vm_cc_refinement_p(cc));
2111 }
2112 return TRUE;
2113}
2114#endif
2115
2116const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2117
2118static void
2119vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2120{
2121 ASSERT_vm_locking();
2122
2123 if (rb_multi_ractor_p()) {
2124 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2125 // Another ractor updated the CC table while we were waiting on the VM lock.
2126 // We have to retry.
2127 return;
2128 }
2129
2130 VALUE ccs_obj = 0;
2131 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2132 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2133
2134 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2135 // Another ractor replaced that entry while we were waiting on the VM lock.
2136 return;
2137 }
2138
2139 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2140 rb_vm_cc_table_delete(new_table, mid);
2141 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2142 }
2143 else {
2144 rb_vm_cc_table_delete(cc_tbl, mid);
2145 }
2146}
2147
2148static const struct rb_callcache *
2149vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2150{
2151 ASSERT_vm_locking();
2152
2153 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2154
2155 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
2156
2157 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2158
2159 if (cme == NULL) {
2160 // undef or not found: can't cache the information
2161 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2162 return &vm_empty_cc;
2163 }
2164
2165 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2166 const VALUE original_cc_table = cc_tbl;
2167 if (!cc_tbl) {
2168 // Is this possible after rb_callable_method_entry ?
2169 cc_tbl = rb_vm_cc_table_create(1);
2170 }
2171 else if (rb_multi_ractor_p()) {
2172 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2173 }
2174
2175 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2176
2177 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2178
2179 VM_ASSERT(cc_tbl);
2180
2181 struct rb_class_cc_entries *ccs = NULL;
2182 {
2183 VALUE ccs_obj;
2184 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2185 ccs = (struct rb_class_cc_entries *)ccs_obj;
2186 }
2187 else {
2188 // TODO: required?
2189 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2190 }
2191 }
2192
2193 cme = rb_check_overloaded_cme(cme, ci);
2194
2195 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2196 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2197
2198 VM_ASSERT(vm_cc_cme(cc) != NULL);
2199 VM_ASSERT(cme->called_id == mid);
2200 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2201
2202 if (original_cc_table != cc_tbl) {
2203 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2204 }
2205
2206 return cc;
2207}
2208
2209static const struct rb_callcache *
2210vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2211{
2212 VALUE cc_tbl;
2213 struct rb_class_cc_entries *ccs;
2214retry:
2215 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2216 ccs = NULL;
2217
2218 if (cc_tbl) {
2219 // CCS data is keyed on method id, so we don't need the method id
2220 // for doing comparisons in the `for` loop below.
2221
2222 VALUE ccs_obj;
2223 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2224 ccs = (struct rb_class_cc_entries *)ccs_obj;
2225 const int ccs_len = ccs->len;
2226
2227 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2228 RB_VM_LOCKING() {
2229 vm_evict_cc(klass, cc_tbl, mid);
2230 }
2231 goto retry;
2232 }
2233 else {
2234 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2235
2236 // We already know the method id is correct because we had
2237 // to look up the ccs_data by method id. All we need to
2238 // compare is argc and flag
2239 unsigned int argc = vm_ci_argc(ci);
2240 unsigned int flag = vm_ci_flag(ci);
2241
2242 for (int i=0; i<ccs_len; i++) {
2243 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2244 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2245 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2246
2247 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2248
2249 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2250 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2251
2252 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2253 VM_ASSERT(ccs_cc->klass == klass);
2254 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2255
2256 return ccs_cc;
2257 }
2258 }
2259 }
2260 }
2261 }
2262
2263 RB_GC_GUARD(cc_tbl);
2264 return NULL;
2265}
2266
2267static const struct rb_callcache *
2268vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2269{
2270 const ID mid = vm_ci_mid(ci);
2271
2272 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2273 if (cc) {
2274 return cc;
2275 }
2276
2277 RB_VM_LOCKING() {
2278 if (rb_multi_ractor_p()) {
2279 // The CC may have been populated by another ractor while we were waiting on the lock,
2280 // so we must lookup a second time.
2281 cc = vm_lookup_cc(klass, ci, mid);
2282 }
2283
2284 if (!cc) {
2285 cc = vm_populate_cc(klass, ci, mid);
2286 }
2287 }
2288
2289 return cc;
2290}
2291
2292const struct rb_callcache *
2293rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2294{
2295 const struct rb_callcache *cc;
2296
2297 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2298
2299 cc = vm_search_cc(klass, ci);
2300
2301 VM_ASSERT(cc);
2302 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2303 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2304 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2305 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2306 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2307
2308 return cc;
2309}
2310
2311static const struct rb_callcache *
2312vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2313{
2314#if USE_DEBUG_COUNTER
2315 const struct rb_callcache *old_cc = cd->cc;
2316#endif
2317
2318 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2319
2320#if OPT_INLINE_METHOD_CACHE
2321 cd->cc = cc;
2322
2323 const struct rb_callcache *empty_cc = &vm_empty_cc;
2324 if (cd_owner && cc != empty_cc) {
2325 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2326 }
2327
2328#if USE_DEBUG_COUNTER
2329 if (!old_cc || old_cc == empty_cc) {
2330 // empty
2331 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2332 }
2333 else if (old_cc == cc) {
2334 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2335 }
2336 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2337 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2338 }
2339 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2340 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2341 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2342 }
2343 else {
2344 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2345 }
2346#endif
2347#endif // OPT_INLINE_METHOD_CACHE
2348
2349 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2350 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2351
2352 return cc;
2353}
2354
2355ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass));
2356static const struct rb_callcache *
2357vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass)
2358{
2359 const struct rb_callcache *cc = cd->cc;
2360
2361#if OPT_INLINE_METHOD_CACHE
2362 if (LIKELY(vm_cc_class_check(cc, klass))) {
2363 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2364 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2365 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2366 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2367 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2368 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2369
2370 return cc;
2371 }
2372 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2373 }
2374 else {
2375 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2376 }
2377#endif
2378
2379 return vm_search_method_slowpath0((VALUE)CFP_ISEQ(reg_cfp), cd, klass);
2380}
2381
2382static const struct rb_callable_method_entry_struct *
2383vm_search_method(struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
2384{
2385 VALUE klass = CLASS_OF(recv);
2386 VM_ASSERT(klass != Qfalse);
2387 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2388
2389 const struct rb_callcache *cc = vm_search_method_fastpath(reg_cfp, cd, klass);
2390 return vm_cc_cme(cc);
2391}
2392
2394rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2395{
2396 // Called from ZJIT with the compile-time iseq, which may differ from
2397 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2398 VALUE klass = CLASS_OF(recv);
2399 const struct rb_callcache *cc = vm_search_method_slowpath0(cd_owner, cd, klass);
2400 return vm_cc_cme(cc);
2401}
2402
2403#if __has_attribute(transparent_union)
2404typedef union {
2405 VALUE (*anyargs)(ANYARGS);
2406 VALUE (*f00)(VALUE);
2407 VALUE (*f01)(VALUE, VALUE);
2408 VALUE (*f02)(VALUE, VALUE, VALUE);
2409 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2410 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2411 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2412 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2413 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2422 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2423} __attribute__((__transparent_union__)) cfunc_type;
2424# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2425#else
2426typedef VALUE (*cfunc_type)(ANYARGS);
2427# define make_cfunc_type(f) (cfunc_type)(f)
2428#endif
2429
2430static inline int
2431check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2432{
2433 if (! me) {
2434 return false;
2435 }
2436 else {
2437 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2438 VM_ASSERT(callable_method_entry_p(me));
2439 VM_ASSERT(me->def);
2440 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2441 return false;
2442 }
2443 else {
2444#if __has_attribute(transparent_union)
2445 return me->def->body.cfunc.func == func.anyargs;
2446#else
2447 return me->def->body.cfunc.func == func;
2448#endif
2449 }
2450 }
2451}
2452
2453static inline int
2454check_method_basic_definition(const rb_callable_method_entry_t *me)
2455{
2456 return me && METHOD_ENTRY_BASIC(me);
2457}
2458
2459static inline int
2460vm_method_cfunc_is(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv, cfunc_type func)
2461{
2462 VM_ASSERT(reg_cfp != NULL);
2463 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
2464 return check_cfunc(cme, func);
2465}
2466
2467bool
2468rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2469{
2470 return check_cfunc(me, func);
2471}
2472
2473int
2474rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2475{
2476 // Called from ZJIT with the compile-time iseq, which may differ from
2477 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2478 VALUE klass = CLASS_OF(recv);
2479 const struct rb_callcache *cc = vm_search_method_slowpath0((VALUE)iseq, cd, klass);
2480 const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc);
2481 return check_cfunc(cme, func);
2482}
2483
2484#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2485#define vm_method_cfunc_is(reg_cfp, cd, recv, func) vm_method_cfunc_is(reg_cfp, cd, recv, make_cfunc_type(func))
2486
2487#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2488
2489static inline bool
2490FIXNUM_2_P(VALUE a, VALUE b)
2491{
2492 /* FIXNUM_P(a) && FIXNUM_P(b)
2493 * == ((a & 1) && (b & 1))
2494 * == a & b & 1 */
2495 SIGNED_VALUE x = a;
2496 SIGNED_VALUE y = b;
2497 SIGNED_VALUE z = x & y & 1;
2498 return z == 1;
2499}
2500
2501static inline bool
2502FLONUM_2_P(VALUE a, VALUE b)
2503{
2504#if USE_FLONUM
2505 /* FLONUM_P(a) && FLONUM_P(b)
2506 * == ((a & 3) == 2) && ((b & 3) == 2)
2507 * == ! ((a ^ 2) | (b ^ 2) & 3)
2508 */
2509 SIGNED_VALUE x = a;
2510 SIGNED_VALUE y = b;
2511 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2512 return !z;
2513#else
2514 return false;
2515#endif
2516}
2517
2518static VALUE
2519opt_equality_specialized(VALUE recv, VALUE obj)
2520{
2521 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2522 goto compare_by_identity;
2523 }
2524 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2525 goto compare_by_identity;
2526 }
2527 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2528 goto compare_by_identity;
2529 }
2530 else if (SPECIAL_CONST_P(recv)) {
2531 //
2532 }
2533 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2534 double a = RFLOAT_VALUE(recv);
2535 double b = RFLOAT_VALUE(obj);
2536
2537 return RBOOL(a == b);
2538 }
2539 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2540 if (recv == obj) {
2541 return Qtrue;
2542 }
2543 else if (RB_TYPE_P(obj, T_STRING)) {
2544 return rb_str_eql_internal(obj, recv);
2545 }
2546 }
2547 return Qundef;
2548
2549 compare_by_identity:
2550 return RBOOL(recv == obj);
2551}
2552
2553static VALUE
2554opt_equality(struct rb_control_frame_struct *reg_cfp, VALUE recv, VALUE obj, CALL_DATA cd)
2555{
2556 VM_ASSERT(reg_cfp != NULL);
2557
2558 VALUE val = opt_equality_specialized(recv, obj);
2559 if (!UNDEF_P(val)) return val;
2560
2561 if (!vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_equal)) {
2562 return Qundef;
2563 }
2564 else {
2565 return RBOOL(recv == obj);
2566 }
2567}
2568
2569#undef EQ_UNREDEFINED_P
2570
2571static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2572NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2573
2574static VALUE
2575opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2576{
2577 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2578
2579 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2580 return RBOOL(recv == obj);
2581 }
2582 else {
2583 return Qundef;
2584 }
2585}
2586
2587static VALUE
2588opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2589{
2590 VALUE val = opt_equality_specialized(recv, obj);
2591 if (!UNDEF_P(val)) {
2592 return val;
2593 }
2594 else {
2595 return opt_equality_by_mid_slowpath(recv, obj, mid);
2596 }
2597}
2598
2599VALUE
2600rb_equal_opt(VALUE obj1, VALUE obj2)
2601{
2602 return opt_equality_by_mid(obj1, obj2, idEq);
2603}
2604
2605VALUE
2606rb_eql_opt(VALUE obj1, VALUE obj2)
2607{
2608 return opt_equality_by_mid(obj1, obj2, idEqlP);
2609}
2610
2611extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2612extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2613
2614static VALUE
2615check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2616{
2617 switch (type) {
2618 case VM_CHECKMATCH_TYPE_WHEN:
2619 return pattern;
2620 case VM_CHECKMATCH_TYPE_RESCUE:
2621 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2622 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2623 }
2624 /* fall through */
2625 case VM_CHECKMATCH_TYPE_CASE: {
2626 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2627 }
2628 default:
2629 rb_bug("check_match: unreachable");
2630 }
2631}
2632
2633
2634static inline VALUE
2635double_cmp_lt(double a, double b)
2636{
2637 return RBOOL(a < b);
2638}
2639
2640static inline VALUE
2641double_cmp_le(double a, double b)
2642{
2643 return RBOOL(a <= b);
2644}
2645
2646static inline VALUE
2647double_cmp_gt(double a, double b)
2648{
2649 return RBOOL(a > b);
2650}
2651
2652static inline VALUE
2653double_cmp_ge(double a, double b)
2654{
2655 return RBOOL(a >= b);
2656}
2657
2658// Copied by vm_dump.c
2659static inline VALUE *
2660vm_base_ptr(const rb_control_frame_t *cfp)
2661{
2662 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2663
2664 if (CFP_ISEQ(cfp) && VM_FRAME_RUBYFRAME_P(cfp)) {
2665 VALUE *bp = prev_cfp->sp + ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size + VM_ENV_DATA_SIZE;
2666
2667 if (ISEQ_BODY(CFP_ISEQ(cfp))->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2668 int lts = ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size;
2669 int params = ISEQ_BODY(CFP_ISEQ(cfp))->param.size;
2670
2671 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2672 bp += vm_ci_argc(ci);
2673 }
2674
2675 if (ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2676 /* adjust `self' */
2677 bp += 1;
2678 }
2679#if VM_DEBUG_BP_CHECK
2680 if (bp != cfp->bp_check) {
2681 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2682 (long)(cfp->bp_check - GET_EC()->vm_stack),
2683 (long)(bp - GET_EC()->vm_stack));
2684 rb_bug("vm_base_ptr: unreachable");
2685 }
2686#endif
2687 return bp;
2688 }
2689 else {
2690 return NULL;
2691 }
2692}
2693
2694VALUE *
2695rb_vm_base_ptr(const rb_control_frame_t *cfp)
2696{
2697 return vm_base_ptr(cfp);
2698}
2699
2700/* method call processes with call_info */
2701
2702#include "vm_args.c"
2703
2704static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2705ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2706static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2707static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2708static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2709static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2710static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2711
2712static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2713
2714static VALUE
2715vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2716{
2717 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2718
2719 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2720}
2721
2722static VALUE
2723vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2724{
2725 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2726
2727 const struct rb_callcache *cc = calling->cc;
2728 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2729 int param = ISEQ_BODY(iseq)->param.size;
2730 int local = ISEQ_BODY(iseq)->local_table_size;
2731 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2732}
2733
2734bool
2735rb_simple_iseq_p(const rb_iseq_t *iseq)
2736{
2737 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2738 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2739 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2740 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2741 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2742 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2743 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2744 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2745 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2746}
2747
2748bool
2749rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2750{
2751 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2752 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2753 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2754 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2757 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2758 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2759 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2760}
2761
2762bool
2763rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2764{
2765 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2766 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2767 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2768 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2769 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2770 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2771 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2772 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2773}
2774
2775#define ALLOW_HEAP_ARGV (-2)
2776#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2777
2778static inline bool
2779vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2780{
2781 vm_check_canary(GET_EC(), cfp->sp);
2782 bool ret = false;
2783
2784 if (!NIL_P(ary)) {
2785 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2786 long len = RARRAY_LEN(ary);
2787 int argc = calling->argc;
2788
2789 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2790 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2791 * a temporary array, instead of trying to keeping arguments on the VM stack.
2792 */
2793 VALUE *argv = cfp->sp - argc;
2794 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2795 rb_ary_cat(argv_ary, argv, argc);
2796 rb_ary_cat(argv_ary, ptr, len);
2797 cfp->sp -= argc - 1;
2798 cfp->sp[-1] = argv_ary;
2799 calling->argc = 1;
2800 calling->heap_argv = argv_ary;
2801 RB_GC_GUARD(ary);
2802 }
2803 else {
2804 long i;
2805
2806 if (max_args >= 0 && len + argc > max_args) {
2807 /* If only a given max_args is allowed, copy up to max args.
2808 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2809 * where additional arguments are ignored.
2810 *
2811 * Also, copy up to one more argument than the maximum,
2812 * in case it is an empty keyword hash that will be removed.
2813 */
2814 calling->argc += len - (max_args - argc + 1);
2815 len = max_args - argc + 1;
2816 ret = true;
2817 }
2818 else {
2819 /* Unset heap_argv if set originally. Can happen when
2820 * forwarding modified arguments, where heap_argv was used
2821 * originally, but heap_argv not supported by the forwarded
2822 * method in all cases.
2823 */
2824 calling->heap_argv = 0;
2825 }
2826 CHECK_VM_STACK_OVERFLOW(cfp, len);
2827
2828 for (i = 0; i < len; i++) {
2829 *cfp->sp++ = ptr[i];
2830 }
2831 calling->argc += i;
2832 }
2833 }
2834
2835 return ret;
2836}
2837
2838static inline void
2839vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2840{
2841 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2842 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2843 const VALUE h = rb_hash_new_with_size(kw_len);
2844 VALUE *sp = cfp->sp;
2845 int i;
2846
2847 for (i=0; i<kw_len; i++) {
2848 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2849 }
2850 (sp-kw_len)[0] = h;
2851
2852 cfp->sp -= kw_len - 1;
2853 calling->argc -= kw_len - 1;
2854 calling->kw_splat = 1;
2855}
2856
2857static inline VALUE
2858vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2859{
2860 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2861 if (keyword_hash != Qnil) {
2862 /* Convert a non-hash keyword splat to a new hash */
2863 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2864 }
2865 }
2866 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2867 /* Convert a hash keyword splat to a new hash unless
2868 * a mutable keyword splat was passed.
2869 * Skip allocating new hash for empty keyword splat, as empty
2870 * keyword splat will be ignored by both callers.
2871 */
2872 keyword_hash = rb_hash_dup(keyword_hash);
2873 }
2874 return keyword_hash;
2875}
2876
2877static inline void
2878CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2879 struct rb_calling_info *restrict calling,
2880 const struct rb_callinfo *restrict ci, int max_args)
2881{
2882 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2883 if (IS_ARGS_KW_SPLAT(ci)) {
2884 // f(*a, **kw)
2885 VM_ASSERT(calling->kw_splat == 1);
2886
2887 cfp->sp -= 2;
2888 calling->argc -= 2;
2889 VALUE ary = cfp->sp[0];
2890 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2891
2892 // splat a
2893 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2894
2895 // put kw
2896 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2897 if (UNLIKELY(calling->heap_argv)) {
2898 rb_ary_push(calling->heap_argv, kwh);
2899 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2900 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2901 calling->kw_splat = 0;
2902 }
2903 }
2904 else {
2905 cfp->sp[0] = kwh;
2906 cfp->sp++;
2907 calling->argc++;
2908
2909 VM_ASSERT(calling->kw_splat == 1);
2910 }
2911 }
2912 else {
2913 calling->kw_splat = 0;
2914 }
2915 }
2916 else {
2917 // f(*a)
2918 VM_ASSERT(calling->kw_splat == 0);
2919
2920 cfp->sp -= 1;
2921 calling->argc -= 1;
2922 VALUE ary = cfp->sp[0];
2923
2924 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2925 goto check_keyword;
2926 }
2927
2928 // check the last argument
2929 VALUE last_hash, argv_ary;
2930 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2931 if (!IS_ARGS_KEYWORD(ci) &&
2932 RARRAY_LEN(argv_ary) > 0 &&
2933 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2934 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2935
2936 rb_ary_pop(argv_ary);
2937 if (!RHASH_EMPTY_P(last_hash)) {
2938 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2939 calling->kw_splat = 1;
2940 }
2941 }
2942 }
2943 else {
2944check_keyword:
2945 if (!IS_ARGS_KEYWORD(ci) &&
2946 calling->argc > 0 &&
2947 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2948 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2949
2950 if (RHASH_EMPTY_P(last_hash)) {
2951 calling->argc--;
2952 cfp->sp -= 1;
2953 }
2954 else {
2955 cfp->sp[-1] = rb_hash_dup(last_hash);
2956 calling->kw_splat = 1;
2957 }
2958 }
2959 }
2960 }
2961 }
2962 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2963 // f(**kw)
2964 VM_ASSERT(calling->kw_splat == 1);
2965 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2966
2967 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2968 cfp->sp--;
2969 calling->argc--;
2970 calling->kw_splat = 0;
2971 }
2972 else {
2973 cfp->sp[-1] = kwh;
2974 }
2975 }
2976 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2977 // f(k1:1, k2:2)
2978 VM_ASSERT(calling->kw_splat == 0);
2979
2980 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2981 * by creating a keyword hash.
2982 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2983 */
2984 vm_caller_setup_arg_kw(cfp, calling, ci);
2985 }
2986}
2987
2988#define USE_OPT_HIST 0
2989
2990#if USE_OPT_HIST
2991#define OPT_HIST_MAX 64
2992static int opt_hist[OPT_HIST_MAX+1];
2993
2994__attribute__((destructor))
2995static void
2996opt_hist_show_results_at_exit(void)
2997{
2998 for (int i=0; i<OPT_HIST_MAX; i++) {
2999 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
3000 }
3001}
3002#endif
3003
3004static VALUE
3005vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3006 struct rb_calling_info *calling)
3007{
3008 const struct rb_callcache *cc = calling->cc;
3009 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3010 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3011 const int opt = calling->argc - lead_num;
3012 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3013 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3014 const int param = ISEQ_BODY(iseq)->param.size;
3015 const int local = ISEQ_BODY(iseq)->local_table_size;
3016 const int delta = opt_num - opt;
3017
3018 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3019
3020#if USE_OPT_HIST
3021 if (opt_pc < OPT_HIST_MAX) {
3022 opt_hist[opt]++;
3023 }
3024 else {
3025 opt_hist[OPT_HIST_MAX]++;
3026 }
3027#endif
3028
3029 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3030}
3031
3032static VALUE
3033vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3034 struct rb_calling_info *calling)
3035{
3036 const struct rb_callcache *cc = calling->cc;
3037 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3038 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3039 const int opt = calling->argc - lead_num;
3040 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3041
3042 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3043
3044#if USE_OPT_HIST
3045 if (opt_pc < OPT_HIST_MAX) {
3046 opt_hist[opt]++;
3047 }
3048 else {
3049 opt_hist[OPT_HIST_MAX]++;
3050 }
3051#endif
3052
3053 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3054}
3055
3056static void
3057args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3058 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3059 VALUE *const locals);
3060
3061static VALUE
3062vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3063 struct rb_calling_info *calling)
3064{
3065 const struct rb_callcache *cc = calling->cc;
3066 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3067 int param_size = ISEQ_BODY(iseq)->param.size;
3068 int local_size = ISEQ_BODY(iseq)->local_table_size;
3069
3070 // Setting up local size and param size
3071 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3072
3073 local_size = local_size + vm_ci_argc(calling->cd->ci);
3074 param_size = param_size + vm_ci_argc(calling->cd->ci);
3075
3076 cfp->sp[0] = (VALUE)calling->cd->ci;
3077
3078 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3079}
3080
3081static VALUE
3082vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3083 struct rb_calling_info *calling)
3084{
3085 const struct rb_callinfo *ci = calling->cd->ci;
3086 const struct rb_callcache *cc = calling->cc;
3087
3088 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3089 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3090
3091 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3092 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3093 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3094 const int ci_kw_len = kw_arg->keyword_len;
3095 const VALUE * const ci_keywords = kw_arg->keywords;
3096 VALUE *argv = cfp->sp - calling->argc;
3097 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3098 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3099 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3100 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3101 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3102
3103 int param = ISEQ_BODY(iseq)->param.size;
3104 int local = ISEQ_BODY(iseq)->local_table_size;
3105 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3106}
3107
3108static VALUE
3109vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3110 struct rb_calling_info *calling)
3111{
3112 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3113 const struct rb_callcache *cc = calling->cc;
3114
3115 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3116 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3117
3118 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3119 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3120 VALUE * const argv = cfp->sp - calling->argc;
3121 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3122
3123 int i;
3124 for (i=0; i<kw_param->num; i++) {
3125 klocals[i] = kw_param->default_values[i];
3126 }
3127 klocals[i] = INT2FIX(0); // kw specify flag
3128 // NOTE:
3129 // nobody check this value, but it should be cleared because it can
3130 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3131
3132 int param = ISEQ_BODY(iseq)->param.size;
3133 int local = ISEQ_BODY(iseq)->local_table_size;
3134 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3135}
3136
3137static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3138
3139static VALUE
3140vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3141 struct rb_calling_info *calling)
3142{
3143 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3144 cfp->sp -= (calling->argc + 1);
3145 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3146 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3147}
3148
3149VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3150
3151static void
3152warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3153{
3154 rb_vm_t *vm = GET_VM();
3155 set_table *dup_check_table = &vm->unused_block_warning_table;
3156 st_data_t key;
3157 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3158
3159 union {
3160 VALUE v;
3161 unsigned char b[SIZEOF_VALUE];
3162 } k1 = {
3163 .v = (VALUE)pc,
3164 }, k2 = {
3165 .v = (VALUE)cme->def,
3166 };
3167
3168 // relax check
3169 if (!strict_unused_block) {
3170 key = (st_data_t)cme->def->original_id;
3171
3172 if (set_table_lookup(dup_check_table, key)) {
3173 return;
3174 }
3175 }
3176
3177 // strict check
3178 // make unique key from pc and me->def pointer
3179 key = 0;
3180 for (int i=0; i<SIZEOF_VALUE; i++) {
3181 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3182 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3183 }
3184
3185 if (0) {
3186 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3187 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3188 fprintf(stderr, "key:%p\n", (void *)key);
3189 }
3190
3191 // duplication check
3192 if (set_insert(dup_check_table, key)) {
3193 // already shown
3194 }
3195 else if (RTEST(ruby_verbose) || strict_unused_block) {
3196 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3197 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3198
3199 if (!NIL_P(m_loc)) {
3200 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3201 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3202 }
3203 else {
3204 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3205 }
3206 }
3207}
3208
3209static inline int
3210vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3211 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3212{
3213 const struct rb_callinfo *ci = calling->cd->ci;
3214 const struct rb_callcache *cc = calling->cc;
3215
3216 VM_ASSERT((vm_ci_argc(ci), 1));
3217 VM_ASSERT(vm_cc_cme(cc) != NULL);
3218
3219 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3220 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3221 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3222 warn_unused_block(vm_cc_cme(cc), iseq, (void *)CFP_PC(ec->cfp));
3223 }
3224
3225 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3226 if (LIKELY(rb_simple_iseq_p(iseq))) {
3227 rb_control_frame_t *cfp = ec->cfp;
3228 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3229 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3230
3231 if (calling->argc != lead_num) {
3232 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3233 }
3234
3235 //VM_ASSERT(ci == calling->cd->ci);
3236 VM_ASSERT(cc == calling->cc);
3237
3238 if (vm_call_iseq_optimizable_p(ci, cc)) {
3239 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) && ruby_vm_c_events_enabled == 0) {
3240 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3241 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3242 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3243 }
3244 else {
3245 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3246 }
3247 }
3248 return 0;
3249 }
3250 else if (rb_iseq_only_optparam_p(iseq)) {
3251 rb_control_frame_t *cfp = ec->cfp;
3252
3253 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3254 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3255
3256 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3257 const int argc = calling->argc;
3258 const int opt = argc - lead_num;
3259
3260 if (opt < 0 || opt > opt_num) {
3261 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3262 }
3263
3264 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3265 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3266 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3267 vm_call_cacheable(ci, cc));
3268 }
3269 else {
3270 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3271 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3272 vm_call_cacheable(ci, cc));
3273 }
3274
3275 /* initialize opt vars for self-references */
3276 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3277 for (int i=argc; i<lead_num + opt_num; i++) {
3278 argv[i] = Qnil;
3279 }
3280 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3281 }
3282 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3283 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3284 const int argc = calling->argc;
3285 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3286
3287 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3288 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3289
3290 if (argc - kw_arg->keyword_len == lead_num) {
3291 const int ci_kw_len = kw_arg->keyword_len;
3292 const VALUE * const ci_keywords = kw_arg->keywords;
3293 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3294 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3295
3296 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3297 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3298
3299 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3300 vm_call_cacheable(ci, cc));
3301
3302 return 0;
3303 }
3304 }
3305 else if (argc == lead_num) {
3306 /* no kwarg */
3307 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3308 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3309
3310 if (klocals[kw_param->num] == INT2FIX(0)) {
3311 /* copy from default_values */
3312 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3313 vm_call_cacheable(ci, cc));
3314 }
3315
3316 return 0;
3317 }
3318 }
3319 }
3320
3321 // Called iseq is using ... param
3322 // def foo(...) # <- iseq for foo will have "forwardable"
3323 //
3324 // We want to set the `...` local to the caller's CI
3325 // foo(1, 2) # <- the ci for this should end up as `...`
3326 //
3327 // So hopefully the stack looks like:
3328 //
3329 // => 1
3330 // => 2
3331 // => *
3332 // => **
3333 // => &
3334 // => ... # <- points at `foo`s CI
3335 // => cref_or_me
3336 // => specval
3337 // => type
3338 //
3339 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3340 bool can_fastpath = true;
3341
3342 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3343 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3344 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3345 ci = vm_ci_new_runtime(
3346 vm_ci_mid(ci),
3347 vm_ci_flag(ci),
3348 vm_ci_argc(ci),
3349 vm_ci_kwarg(ci));
3350 }
3351 else {
3352 ci = forward_cd->caller_ci;
3353 }
3354 can_fastpath = false;
3355 }
3356 // C functions calling iseqs will stack allocate a CI,
3357 // so we need to convert it to heap allocated
3358 if (!vm_ci_markable(ci)) {
3359 ci = vm_ci_new_runtime(
3360 vm_ci_mid(ci),
3361 vm_ci_flag(ci),
3362 vm_ci_argc(ci),
3363 vm_ci_kwarg(ci));
3364 can_fastpath = false;
3365 }
3366 argv[param_size - 1] = (VALUE)ci;
3367 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3368 return 0;
3369 }
3370
3371 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3372}
3373
3374static void
3375vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3376{
3377 // This case is when the caller is using a ... parameter.
3378 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3379 // In this case the caller's caller's CI will be on the stack.
3380 //
3381 // For example:
3382 //
3383 // def bar(a, b); a + b; end
3384 // def foo(...); bar(...); end
3385 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3386 //
3387 // Stack layout will be:
3388 //
3389 // > 1
3390 // > 2
3391 // > CI for foo(1, 2)
3392 // > cref_or_me
3393 // > specval
3394 // > type
3395 // > receiver
3396 // > CI for foo(1, 2), via `getlocal ...`
3397 // > ( SP points here )
3398 const VALUE * lep = VM_CF_LEP(cfp);
3399
3400 const rb_iseq_t *iseq;
3401
3402 // If we're in an escaped environment (lambda for example), get the iseq
3403 // from the captured env.
3404 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3405 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3406 iseq = env->iseq;
3407 }
3408 else { // Otherwise use the lep to find the caller
3409 iseq = CFP_ISEQ(rb_vm_search_cf_from_ep(ec, cfp, lep));
3410 }
3411
3412 // Our local storage is below the args we need to copy
3413 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3414
3415 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3416 VALUE * to = cfp->sp - 1; // clobber the CI
3417
3418 if (RTEST(splat)) {
3419 to -= 1; // clobber the splat array
3420 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3421 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3422 to += RARRAY_LEN(splat);
3423 }
3424
3425 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3426 MEMCPY(to, from, VALUE, argc);
3427 cfp->sp = to + argc;
3428
3429 // Stack layout should now be:
3430 //
3431 // > 1
3432 // > 2
3433 // > CI for foo(1, 2)
3434 // > cref_or_me
3435 // > specval
3436 // > type
3437 // > receiver
3438 // > 1
3439 // > 2
3440 // > ( SP points here )
3441}
3442
3443static VALUE
3444vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3445{
3446 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3447
3448 const struct rb_callcache *cc = calling->cc;
3449 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3450 int param_size = ISEQ_BODY(iseq)->param.size;
3451 int local_size = ISEQ_BODY(iseq)->local_table_size;
3452
3453 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3454
3455 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3456 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3457}
3458
3459static VALUE
3460vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3461{
3462 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3463
3464 const struct rb_callcache *cc = calling->cc;
3465 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3466 int param_size = ISEQ_BODY(iseq)->param.size;
3467 int local_size = ISEQ_BODY(iseq)->local_table_size;
3468
3469 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3470
3471 // Setting up local size and param size
3472 local_size = local_size + vm_ci_argc(calling->cd->ci);
3473 param_size = param_size + vm_ci_argc(calling->cd->ci);
3474
3475 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3476 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3477}
3478
3479static inline VALUE
3480vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3481 int opt_pc, int param_size, int local_size)
3482{
3483 const struct rb_callinfo *ci = calling->cd->ci;
3484 const struct rb_callcache *cc = calling->cc;
3485
3486 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3487 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3488 }
3489 else {
3490 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3491 }
3492}
3493
3494static inline VALUE
3495vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3496 int opt_pc, int param_size, int local_size)
3497{
3498 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3499 VALUE *argv = cfp->sp - calling->argc;
3500 VALUE *sp = argv + param_size;
3501 cfp->sp = argv - 1 /* recv */;
3502
3503 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3504 calling->block_handler, (VALUE)me,
3505 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3506 local_size - param_size,
3507 ISEQ_BODY(iseq)->stack_max);
3508 return Qundef;
3509}
3510
3511static inline VALUE
3512vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3513{
3514 const struct rb_callcache *cc = calling->cc;
3515 unsigned int i;
3516 VALUE *argv = cfp->sp - calling->argc;
3517 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3518 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3519 VALUE *src_argv = argv;
3520 VALUE *sp_orig, *sp;
3521 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3522
3523 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3524 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3525 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3526 dst_captured->code.val = src_captured->code.val;
3527 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3528 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3529 }
3530 else {
3531 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3532 }
3533 }
3534
3535 vm_pop_frame(ec, cfp, cfp->ep);
3536 cfp = ec->cfp;
3537
3538 sp_orig = sp = cfp->sp;
3539
3540 /* push self */
3541 sp[0] = calling->recv;
3542 sp++;
3543
3544 /* copy arguments */
3545 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3546 *sp++ = src_argv[i];
3547 }
3548
3549 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3550 calling->recv, calling->block_handler, (VALUE)me,
3551 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3552 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3553 ISEQ_BODY(iseq)->stack_max);
3554
3555 cfp->sp = sp_orig;
3556
3557 return Qundef;
3558}
3559
3560static void
3561ractor_unsafe_check(void)
3562{
3563 if (!rb_ractor_main_p()) {
3564 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3565 }
3566}
3567
3568static VALUE
3569call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3570{
3571 ractor_unsafe_check();
3572 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3573 return (*f)(recv, rb_ary_new4(argc, argv));
3574}
3575
3576static VALUE
3577call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3578{
3579 ractor_unsafe_check();
3580 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3581 return (*f)(argc, argv, recv);
3582}
3583
3584static VALUE
3585call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3586{
3587 ractor_unsafe_check();
3588 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3589 return (*f)(recv);
3590}
3591
3592static VALUE
3593call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3594{
3595 ractor_unsafe_check();
3596 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3597 return (*f)(recv, argv[0]);
3598}
3599
3600static VALUE
3601call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3602{
3603 ractor_unsafe_check();
3604 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3605 return (*f)(recv, argv[0], argv[1]);
3606}
3607
3608static VALUE
3609call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3610{
3611 ractor_unsafe_check();
3612 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3613 return (*f)(recv, argv[0], argv[1], argv[2]);
3614}
3615
3616static VALUE
3617call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3618{
3619 ractor_unsafe_check();
3620 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3621 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3622}
3623
3624static VALUE
3625call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3626{
3627 ractor_unsafe_check();
3628 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3629 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3630}
3631
3632static VALUE
3633call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3634{
3635 ractor_unsafe_check();
3637 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3638}
3639
3640static VALUE
3641call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3642{
3643 ractor_unsafe_check();
3645 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3646}
3647
3648static VALUE
3649call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3650{
3651 ractor_unsafe_check();
3653 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3654}
3655
3656static VALUE
3657call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3658{
3659 ractor_unsafe_check();
3661 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3662}
3663
3664static VALUE
3665call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3666{
3667 ractor_unsafe_check();
3669 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3670}
3671
3672static VALUE
3673call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3674{
3675 ractor_unsafe_check();
3677 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3678}
3679
3680static VALUE
3681call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3682{
3683 ractor_unsafe_check();
3685 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3686}
3687
3688static VALUE
3689call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3690{
3691 ractor_unsafe_check();
3693 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3694}
3695
3696static VALUE
3697call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3698{
3699 ractor_unsafe_check();
3701 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3702}
3703
3704static VALUE
3705call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3706{
3707 ractor_unsafe_check();
3709 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3710}
3711
3712static VALUE
3713ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3714{
3715 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3716 return (*f)(recv, rb_ary_new4(argc, argv));
3717}
3718
3719static VALUE
3720ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3721{
3722 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3723 return (*f)(argc, argv, recv);
3724}
3725
3726static VALUE
3727ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3728{
3729 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3730 return (*f)(recv);
3731}
3732
3733static VALUE
3734ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3735{
3736 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3737 return (*f)(recv, argv[0]);
3738}
3739
3740static VALUE
3741ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3742{
3743 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3744 return (*f)(recv, argv[0], argv[1]);
3745}
3746
3747static VALUE
3748ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3749{
3750 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3751 return (*f)(recv, argv[0], argv[1], argv[2]);
3752}
3753
3754static VALUE
3755ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3756{
3757 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3758 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3759}
3760
3761static VALUE
3762ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3763{
3764 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3765 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3766}
3767
3768static VALUE
3769ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3770{
3772 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3773}
3774
3775static VALUE
3776ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3777{
3779 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3780}
3781
3782static VALUE
3783ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3784{
3786 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3787}
3788
3789static VALUE
3790ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3791{
3793 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3794}
3795
3796static VALUE
3797ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3798{
3800 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3801}
3802
3803static VALUE
3804ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3805{
3807 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3808}
3809
3810static VALUE
3811ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3812{
3814 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3815}
3816
3817static VALUE
3818ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3819{
3821 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3822}
3823
3824static VALUE
3825ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3826{
3828 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3829}
3830
3831static VALUE
3832ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3833{
3835 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3836}
3837
3838static inline int
3839vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3840{
3841 const int ov_flags = RAISED_STACKOVERFLOW;
3842 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3843 if (rb_ec_raised_p(ec, ov_flags)) {
3844 rb_ec_raised_reset(ec, ov_flags);
3845 return TRUE;
3846 }
3847 return FALSE;
3848}
3849
3850#define CHECK_CFP_CONSISTENCY(func) \
3851 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3852 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3853
3854static inline
3855const rb_method_cfunc_t *
3856vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3857{
3858#if VM_DEBUG_VERIFY_METHOD_CACHE
3859 switch (me->def->type) {
3860 case VM_METHOD_TYPE_CFUNC:
3861 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3862 break;
3863# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3864 METHOD_BUG(ISEQ);
3865 METHOD_BUG(ATTRSET);
3866 METHOD_BUG(IVAR);
3867 METHOD_BUG(BMETHOD);
3868 METHOD_BUG(ZSUPER);
3869 METHOD_BUG(UNDEF);
3870 METHOD_BUG(OPTIMIZED);
3871 METHOD_BUG(MISSING);
3872 METHOD_BUG(REFINED);
3873 METHOD_BUG(ALIAS);
3874# undef METHOD_BUG
3875 default:
3876 rb_bug("wrong method type: %d", me->def->type);
3877 }
3878#endif
3879 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3880}
3881
3882static VALUE
3883vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3884 int argc, VALUE *argv, VALUE *stack_bottom)
3885{
3886 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3887 const struct rb_callinfo *ci = calling->cd->ci;
3888 const struct rb_callcache *cc = calling->cc;
3889 VALUE val;
3890 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3891 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3892
3893 VALUE recv = calling->recv;
3894 VALUE block_handler = calling->block_handler;
3895 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3896
3897 if (UNLIKELY(calling->kw_splat)) {
3898 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3899 }
3900
3901 VM_ASSERT(reg_cfp == ec->cfp);
3902
3903 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3904 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3905
3906 vm_push_frame(ec, NULL, frame_type, recv,
3907 block_handler, (VALUE)me,
3908 0, ec->cfp->sp, 0, 0);
3909
3910 int len = cfunc->argc;
3911 if (len >= 0) rb_check_arity(argc, len, len);
3912
3913 reg_cfp->sp = stack_bottom;
3914 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3915
3916 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3917
3918 rb_vm_pop_frame(ec);
3919
3920 VM_ASSERT(ec->cfp->sp == stack_bottom);
3921
3922 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3923 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3924
3925 return val;
3926}
3927
3928// Push a C method frame for a given cme. This is called when JIT code skipped
3929// pushing a frame but the C method reached a point where a frame is needed.
3930void
3931rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3932{
3933 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3934 rb_execution_context_t *ec = GET_EC();
3935 VALUE *sp = ec->cfp->sp;
3936 VALUE recv = *(sp - recv_idx - 1);
3937 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3938 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3939#if VM_CHECK_MODE > 0
3940 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3941 *(GET_EC()->cfp->sp) = Qfalse;
3942#endif
3943 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3944}
3945
3946// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3947bool
3948rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3949{
3950 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3951}
3952
3953static VALUE
3954vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3955{
3956 int argc = calling->argc;
3957 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3958 VALUE *argv = &stack_bottom[1];
3959
3960 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3961}
3962
3963static VALUE
3964vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3965{
3966 const struct rb_callinfo *ci = calling->cd->ci;
3967 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3968
3969 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3970 VALUE argv_ary;
3971 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3972 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3973 int argc = RARRAY_LENINT(argv_ary);
3974 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3975 VALUE *stack_bottom = reg_cfp->sp - 2;
3976
3977 VM_ASSERT(calling->argc == 1);
3978 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3979 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3980
3981 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3982 }
3983 else {
3984 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3985
3986 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3987 }
3988}
3989
3990static inline VALUE
3991vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3992{
3993 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3994 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3995
3996 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3997 return vm_call_cfunc_other(ec, reg_cfp, calling);
3998 }
3999
4000 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4001 calling->kw_splat = 0;
4002 int i;
4003 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
4004 VALUE *sp = stack_bottom;
4005 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
4006 for(i = 0; i < argc; i++) {
4007 *++sp = argv[i];
4008 }
4009 reg_cfp->sp = sp+1;
4010
4011 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4012}
4013
4014static inline VALUE
4015vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4016{
4017 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4018 VALUE argv_ary = reg_cfp->sp[-1];
4019 int argc = RARRAY_LENINT(argv_ary);
4020 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4021 VALUE last_hash;
4022 int argc_offset = 0;
4023
4024 if (UNLIKELY(argc > 0 &&
4025 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4026 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4027 if (!RHASH_EMPTY_P(last_hash)) {
4028 return vm_call_cfunc_other(ec, reg_cfp, calling);
4029 }
4030 argc_offset++;
4031 }
4032 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4033}
4034
4035static inline VALUE
4036vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4037{
4038 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4039 VALUE keyword_hash = reg_cfp->sp[-1];
4040
4041 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4042 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4043 }
4044
4045 return vm_call_cfunc_other(ec, reg_cfp, calling);
4046}
4047
4048static VALUE
4049vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4050{
4051 const struct rb_callinfo *ci = calling->cd->ci;
4052 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4053
4054 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4055 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4056 // f(*a)
4057 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4058 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4059 }
4060 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4061 // f(*a, **kw)
4062 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4063 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4064 }
4065 }
4066
4067 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4068 return vm_call_cfunc_other(ec, reg_cfp, calling);
4069}
4070
4071static VALUE
4072vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4073{
4074 const struct rb_callcache *cc = calling->cc;
4075 RB_DEBUG_COUNTER_INC(ccf_ivar);
4076 cfp->sp -= 1;
4077 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4078 return ivar;
4079}
4080
4081static VALUE
4082vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4083{
4084 RB_DEBUG_COUNTER_INC(ccf_attrset);
4085 VALUE val = *(cfp->sp - 1);
4086 cfp->sp -= 2;
4087 attr_index_t index;
4088 shape_id_t dest_shape_id;
4089 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4090 ID id = vm_cc_cme(cc)->def->body.attr.id;
4091 rb_check_frozen(obj);
4092 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4093 if (UNDEF_P(res)) {
4094 switch (BUILTIN_TYPE(obj)) {
4095 case T_OBJECT:
4096 break;
4097 case T_CLASS:
4098 case T_MODULE:
4099 {
4100 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4101 if (!UNDEF_P(res)) {
4102 return res;
4103 }
4104 }
4105 break;
4106 default:
4107 {
4108 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4109 if (!UNDEF_P(res)) {
4110 return res;
4111 }
4112 }
4113 }
4114 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4115 }
4116 return res;
4117}
4118
4119static VALUE
4120vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4121{
4122 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4123}
4124
4125static inline VALUE
4126vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4127{
4128 rb_proc_t *proc;
4129 VALUE val;
4130 const struct rb_callcache *cc = calling->cc;
4131 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4132 VALUE procv = cme->def->body.bmethod.proc;
4133
4134 if (!RB_OBJ_SHAREABLE_P(procv) &&
4135 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4136 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4137 }
4138
4139 /* control block frame */
4140 GetProcPtr(procv, proc);
4141 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4142
4143 return val;
4144}
4145
4146static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4147
4148static VALUE
4149vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4150{
4151 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4152
4153 const struct rb_callcache *cc = calling->cc;
4154 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4155 VALUE procv = cme->def->body.bmethod.proc;
4156
4157 if (!RB_OBJ_SHAREABLE_P(procv) &&
4158 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4159 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4160 }
4161
4162 rb_proc_t *proc;
4163 GetProcPtr(procv, proc);
4164 const struct rb_block *block = &proc->block;
4165
4166 while (vm_block_type(block) == block_type_proc) {
4167 block = vm_proc_block(block->as.proc);
4168 }
4169 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4170
4171 const struct rb_captured_block *captured = &block->as.captured;
4172 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4173 VALUE * const argv = cfp->sp - calling->argc;
4174 const int arg_size = ISEQ_BODY(iseq)->param.size;
4175
4176 int opt_pc;
4177 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4178 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4179 }
4180 else {
4181 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4182 }
4183
4184 cfp->sp = argv - 1; // -1 for the receiver
4185
4186 vm_push_frame(ec, iseq,
4187 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4188 calling->recv,
4189 VM_GUARDED_PREV_EP(captured->ep),
4190 (VALUE)cme,
4191 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4192 argv + arg_size,
4193 ISEQ_BODY(iseq)->local_table_size - arg_size,
4194 ISEQ_BODY(iseq)->stack_max);
4195
4196 return Qundef;
4197}
4198
4199static VALUE
4200vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4201{
4202 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4203
4204 VALUE *argv;
4205 int argc;
4206 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4207 if (UNLIKELY(calling->heap_argv)) {
4208 argv = RARRAY_PTR(calling->heap_argv);
4209 cfp->sp -= 2;
4210 }
4211 else {
4212 argc = calling->argc;
4213 argv = ALLOCA_N(VALUE, argc);
4214 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4215 cfp->sp += - argc - 1;
4216 }
4217
4218 return vm_call_bmethod_body(ec, calling, argv);
4219}
4220
4221static VALUE
4222vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4223{
4224 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4225
4226 const struct rb_callcache *cc = calling->cc;
4227 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4228 VALUE procv = cme->def->body.bmethod.proc;
4229 rb_proc_t *proc;
4230 GetProcPtr(procv, proc);
4231 const struct rb_block *block = &proc->block;
4232
4233 while (vm_block_type(block) == block_type_proc) {
4234 block = vm_proc_block(block->as.proc);
4235 }
4236 if (vm_block_type(block) == block_type_iseq) {
4237 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4238 return vm_call_iseq_bmethod(ec, cfp, calling);
4239 }
4240
4241 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4242 return vm_call_noniseq_bmethod(ec, cfp, calling);
4243}
4244
4245VALUE
4246rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4247{
4248 VALUE klass = current_class;
4249
4250 /* for prepended Module, then start from cover class */
4251 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4252 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4253 klass = RBASIC_CLASS(klass);
4254 }
4255
4256 while (RTEST(klass)) {
4257 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4258 if (owner == target_owner) {
4259 return klass;
4260 }
4261 klass = RCLASS_SUPER(klass);
4262 }
4263
4264 return current_class; /* maybe module function */
4265}
4266
4267static const rb_callable_method_entry_t *
4268aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4269{
4270 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4271 const rb_callable_method_entry_t *cme;
4272
4273 if (orig_me->defined_class == 0) {
4274 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4275 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4276 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4277
4278 if (me->def->reference_count == 1) {
4279 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4280 }
4281 else {
4283 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4284 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4285 }
4286 }
4287 else {
4288 cme = (const rb_callable_method_entry_t *)orig_me;
4289 }
4290
4291 VM_ASSERT(callable_method_entry_p(cme));
4292 return cme;
4293}
4294
4296rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4297{
4298 return aliased_callable_method_entry(me);
4299}
4300
4301static VALUE
4302vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4303{
4304 calling->cc = &VM_CC_ON_STACK(Qundef,
4305 vm_call_general,
4306 {{0}},
4307 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4308
4309 return vm_call_method_each_type(ec, cfp, calling);
4310}
4311
4312static enum method_missing_reason
4313ci_missing_reason(const struct rb_callinfo *ci)
4314{
4315 enum method_missing_reason stat = MISSING_NOENTRY;
4316 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4317 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4318 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4319 return stat;
4320}
4321
4322static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4323
4324static VALUE
4325vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4326 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4327{
4328 ASSUME(calling->argc >= 0);
4329
4330 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4331 int argc = calling->argc;
4332 VALUE recv = calling->recv;
4333 VALUE klass = CLASS_OF(recv);
4334 ID mid = rb_check_id(&symbol);
4335 flags |= VM_CALL_OPT_SEND;
4336
4337 if (UNLIKELY(! mid)) {
4338 mid = idMethodMissing;
4339 missing_reason = ci_missing_reason(ci);
4340 ec->method_missing_reason = missing_reason;
4341
4342 VALUE argv_ary;
4343 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4344 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4345 rb_ary_unshift(argv_ary, symbol);
4346
4347 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4348 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4349 VALUE exc = rb_make_no_method_exception(
4350 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4351
4352 rb_exc_raise(exc);
4353 }
4354 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4355 }
4356 else {
4357 /* E.g. when argc == 2
4358 *
4359 * | | | | TOPN
4360 * | | +------+
4361 * | | +---> | arg1 | 0
4362 * +------+ | +------+
4363 * | arg1 | -+ +-> | arg0 | 1
4364 * +------+ | +------+
4365 * | arg0 | ---+ | sym | 2
4366 * +------+ +------+
4367 * | recv | | recv | 3
4368 * --+------+--------+------+------
4369 */
4370 int i = argc;
4371 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4372 INC_SP(1);
4373 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4374 argc = ++calling->argc;
4375
4376 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4377 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4378 TOPN(i) = symbol;
4379 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4380 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4381 VALUE exc = rb_make_no_method_exception(
4382 rb_eNoMethodError, 0, recv, argc, argv, priv);
4383
4384 rb_exc_raise(exc);
4385 }
4386 else {
4387 TOPN(i) = rb_str_intern(symbol);
4388 }
4389 }
4390 }
4391
4392 struct rb_forwarding_call_data new_fcd = {
4393 .cd = {
4394 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4395 .cc = NULL,
4396 },
4397 .caller_ci = NULL,
4398 };
4399
4400 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4401 calling->cd = &new_fcd.cd;
4402 }
4403 else {
4404 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4405 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4406 new_fcd.caller_ci = caller_ci;
4407 calling->cd = (struct rb_call_data *)&new_fcd;
4408 }
4409 calling->cc = &VM_CC_ON_STACK(klass,
4410 vm_call_general,
4411 { .method_missing_reason = missing_reason },
4412 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4413
4414 if (flags & VM_CALL_FCALL) {
4415 return vm_call_method(ec, reg_cfp, calling);
4416 }
4417
4418 const struct rb_callcache *cc = calling->cc;
4419 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4420
4421 if (vm_cc_cme(cc) != NULL) {
4422 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4423 case METHOD_VISI_PUBLIC: /* likely */
4424 return vm_call_method_each_type(ec, reg_cfp, calling);
4425 case METHOD_VISI_PRIVATE:
4426 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4427 break;
4428 case METHOD_VISI_PROTECTED:
4429 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4430 break;
4431 default:
4432 VM_UNREACHABLE(vm_call_method);
4433 }
4434 return vm_call_method_missing(ec, reg_cfp, calling);
4435 }
4436
4437 return vm_call_method_nome(ec, reg_cfp, calling);
4438}
4439
4440static VALUE
4441vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4442{
4443 const struct rb_callinfo *ci = calling->cd->ci;
4444 int i;
4445 VALUE sym;
4446
4447 i = calling->argc - 1;
4448
4449 if (calling->argc == 0) {
4450 rb_raise(rb_eArgError, "no method name given");
4451 }
4452
4453 sym = TOPN(i);
4454 /* E.g. when i == 2
4455 *
4456 * | | | | TOPN
4457 * +------+ | |
4458 * | arg1 | ---+ | | 0
4459 * +------+ | +------+
4460 * | arg0 | -+ +-> | arg1 | 1
4461 * +------+ | +------+
4462 * | sym | +---> | arg0 | 2
4463 * +------+ +------+
4464 * | recv | | recv | 3
4465 * --+------+--------+------+------
4466 */
4467 /* shift arguments */
4468 if (i > 0) {
4469 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4470 }
4471 calling->argc -= 1;
4472 DEC_SP(1);
4473
4474 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4475}
4476
4477static VALUE
4478vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4479{
4480 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4481 const struct rb_callinfo *ci = calling->cd->ci;
4482 int flags = VM_CALL_FCALL;
4483 VALUE sym;
4484
4485 VALUE argv_ary;
4486 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4487 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4488 sym = rb_ary_shift(argv_ary);
4489 flags |= VM_CALL_ARGS_SPLAT;
4490 if (calling->kw_splat) {
4491 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4492 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4493 calling->kw_splat = 0;
4494 }
4495 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4496 }
4497
4498 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4499 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4500}
4501
4502static VALUE
4503vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4504{
4505 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4506 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4507}
4508
4509static VALUE
4510vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4511{
4512 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4513
4514 const struct rb_callinfo *ci = calling->cd->ci;
4515 int flags = vm_ci_flag(ci);
4516
4517 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4518 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4519 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4520 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4521 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4522 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4523 }
4524
4525 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4526 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4527}
4528
4529static VALUE
4530vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4531 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4532{
4533 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4534
4535 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4536 unsigned int argc, flag;
4537
4538 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4539 argc = ++calling->argc;
4540
4541 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4542 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4543 vm_check_canary(ec, reg_cfp->sp);
4544 if (argc > 1) {
4545 MEMMOVE(argv+1, argv, VALUE, argc-1);
4546 }
4547 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4548 INC_SP(1);
4549
4550 ec->method_missing_reason = reason;
4551
4552 struct rb_forwarding_call_data new_fcd = {
4553 .cd = {
4554 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4555 .cc = NULL,
4556 },
4557 .caller_ci = NULL,
4558 };
4559
4560 if (!(flag & VM_CALL_FORWARDING)) {
4561 calling->cd = &new_fcd.cd;
4562 }
4563 else {
4564 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4565 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4566 new_fcd.caller_ci = caller_ci;
4567 calling->cd = (struct rb_call_data *)&new_fcd;
4568 }
4569
4570 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4571 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4572 return vm_call_method(ec, reg_cfp, calling);
4573}
4574
4575static VALUE
4576vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4577{
4578 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4579}
4580
4581static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4582static VALUE
4583vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4584{
4585 klass = RCLASS_SUPER(klass);
4586
4587 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4588 if (cme == NULL) {
4589 return vm_call_method_nome(ec, cfp, calling);
4590 }
4591 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4592 cme->def->body.refined.orig_me) {
4593 cme = refined_method_callable_without_refinement(cme);
4594 }
4595
4596 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4597
4598 return vm_call_method_each_type(ec, cfp, calling);
4599}
4600
4601static inline VALUE
4602find_refinement(VALUE refinements, VALUE klass)
4603{
4604 if (NIL_P(refinements)) {
4605 return Qnil;
4606 }
4607 return rb_hash_lookup(refinements, klass);
4608}
4609
4610PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4611static rb_control_frame_t *
4612current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4613{
4614 rb_control_frame_t *top_cfp = cfp;
4615
4616 if (CFP_ISEQ(cfp) && ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_BLOCK) {
4617 const rb_iseq_t *local_iseq = ISEQ_BODY(CFP_ISEQ(cfp))->local_iseq;
4618
4619 do {
4620 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4621 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4622 /* TODO: orphan block */
4623 return top_cfp;
4624 }
4625 } while (CFP_ISEQ(cfp) != local_iseq);
4626 }
4627 return cfp;
4628}
4629
4630static const rb_callable_method_entry_t *
4631refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4632{
4633 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4634 const rb_callable_method_entry_t *cme;
4635
4636 if (orig_me->defined_class == 0) {
4637 cme = NULL;
4639 }
4640 else {
4641 cme = (const rb_callable_method_entry_t *)orig_me;
4642 }
4643
4644 VM_ASSERT(callable_method_entry_p(cme));
4645
4646 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4647 cme = NULL;
4648 }
4649
4650 return cme;
4651}
4652
4653static const rb_callable_method_entry_t *
4654search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4655{
4656 ID mid = vm_ci_mid(calling->cd->ci);
4657 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4658 const struct rb_callcache * const cc = calling->cc;
4659 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4660
4661 for (; cref; cref = CREF_NEXT(cref)) {
4662 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4663 if (NIL_P(refinement)) continue;
4664
4665 const rb_callable_method_entry_t *const ref_me =
4666 rb_callable_method_entry(refinement, mid);
4667
4668 if (ref_me) {
4669 if (vm_cc_call(cc) == vm_call_super_method) {
4670 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4671 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4672 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4673 continue;
4674 }
4675 }
4676
4677 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4678 cme->def != ref_me->def) {
4679 cme = ref_me;
4680 }
4681 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4682 return cme;
4683 }
4684 }
4685 else {
4686 return NULL;
4687 }
4688 }
4689
4690 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4691 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4692 }
4693 else {
4694 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4695 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4696 return cme;
4697 }
4698}
4699
4700static VALUE
4701vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4702{
4703 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4704
4705 if (ref_cme) {
4706 if (calling->cd->cc) {
4707 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4708 RB_OBJ_WRITE(CFP_ISEQ(cfp), &calling->cd->cc, cc);
4709 return vm_call_method(ec, cfp, calling);
4710 }
4711 else {
4712 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4713 calling->cc= ref_cc;
4714 return vm_call_method(ec, cfp, calling);
4715 }
4716 }
4717 else {
4718 return vm_call_method_nome(ec, cfp, calling);
4719 }
4720}
4721
4722static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4723
4724NOINLINE(static VALUE
4725 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4726 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4727
4728static VALUE
4729vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4730 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4731{
4732 int argc = calling->argc;
4733
4734 /* remove self */
4735 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4736 DEC_SP(1);
4737
4738 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4739}
4740
4741static VALUE
4742vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4743{
4744 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4745
4746 const struct rb_callinfo *ci = calling->cd->ci;
4747 VALUE procval = calling->recv;
4748 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4749}
4750
4751static VALUE
4752vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4753{
4754 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4755
4756 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4757 const struct rb_callinfo *ci = calling->cd->ci;
4758
4759 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4760 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4761 }
4762 else {
4763 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4764 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4765 return vm_call_general(ec, reg_cfp, calling);
4766 }
4767}
4768
4769static VALUE
4770vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4771{
4772 VALUE recv = calling->recv;
4773
4774 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4775 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4776 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4777
4778 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4779 return RSTRUCT_GET_RAW(recv, off);
4780}
4781
4782static VALUE
4783vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4784{
4785 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4786
4787 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4788 reg_cfp->sp -= 1;
4789 return ret;
4790}
4791
4792static VALUE
4793vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4794{
4795 VALUE recv = calling->recv;
4796
4797 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4798 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4799 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4800
4801 rb_check_frozen(recv);
4802
4803 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4804 RSTRUCT_SET_RAW(recv, off, val);
4805
4806 return val;
4807}
4808
4809static VALUE
4810vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4811{
4812 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4813
4814 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4815 reg_cfp->sp -= 2;
4816 return ret;
4817}
4818
4819NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4820 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4821
4822#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4823 if (UNLIKELY(ruby_vm_c_events_enabled > 0)) { \
4824 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4825 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4826 var = func; \
4827 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4828 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4829 } \
4830 else { \
4831 nohook; \
4832 var = func; \
4833 }
4834
4835static VALUE
4836vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4837 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4838{
4839 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4840 case OPTIMIZED_METHOD_TYPE_SEND:
4841 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4842 return vm_call_opt_send(ec, cfp, calling);
4843 case OPTIMIZED_METHOD_TYPE_CALL:
4844 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4845 return vm_call_opt_call(ec, cfp, calling);
4846 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4847 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4848 return vm_call_opt_block_call(ec, cfp, calling);
4849 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4850 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4851 rb_check_arity(calling->argc, 0, 0);
4852
4853 VALUE v;
4854 VM_CALL_METHOD_ATTR(v,
4855 vm_call_opt_struct_aref(ec, cfp, calling),
4856 set_vm_cc_ivar(cc); \
4857 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4858 return v;
4859 }
4860 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4861 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4862 rb_check_arity(calling->argc, 1, 1);
4863
4864 VALUE v;
4865 VM_CALL_METHOD_ATTR(v,
4866 vm_call_opt_struct_aset(ec, cfp, calling),
4867 set_vm_cc_ivar(cc); \
4868 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4869 return v;
4870 }
4871 default:
4872 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4873 }
4874}
4875
4876static VALUE
4877vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4878{
4879 const struct rb_callinfo *ci = calling->cd->ci;
4880 const struct rb_callcache *cc = calling->cc;
4881 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4882 VALUE v;
4883
4884 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4885
4886 switch (cme->def->type) {
4887 case VM_METHOD_TYPE_ISEQ:
4888 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4889 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4890 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4891 }
4892 else {
4893 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4894 return vm_call_iseq_setup(ec, cfp, calling);
4895 }
4896
4897 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4898 case VM_METHOD_TYPE_CFUNC:
4899 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4900 return vm_call_cfunc(ec, cfp, calling);
4901
4902 case VM_METHOD_TYPE_ATTRSET:
4903 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4904
4905 rb_check_arity(calling->argc, 1, 1);
4906
4907 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4908
4909 if (vm_cc_markable(cc)) {
4910 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4911 VM_CALL_METHOD_ATTR(v,
4912 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4913 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4914 }
4915 else {
4916 cc = &((struct rb_callcache) {
4917 .flags = T_IMEMO |
4918 (imemo_callcache << FL_USHIFT) |
4919 VM_CALLCACHE_UNMARKABLE |
4920 VM_CALLCACHE_ON_STACK,
4921 .klass = cc->klass,
4922 .cme_ = cc->cme_,
4923 .call_ = cc->call_,
4924 .aux_ = {
4925 .attr = {
4926 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4927 }
4928 },
4929 });
4930
4931 VM_CALL_METHOD_ATTR(v,
4932 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4933 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4934 }
4935 return v;
4936
4937 case VM_METHOD_TYPE_IVAR:
4938 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4939 rb_check_arity(calling->argc, 0, 0);
4940 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4941 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4942 VM_CALL_METHOD_ATTR(v,
4943 vm_call_ivar(ec, cfp, calling),
4944 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4945 return v;
4946
4947 case VM_METHOD_TYPE_MISSING:
4948 vm_cc_method_missing_reason_set(cc, 0);
4949 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4950 return vm_call_method_missing(ec, cfp, calling);
4951
4952 case VM_METHOD_TYPE_BMETHOD:
4953 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4954 return vm_call_bmethod(ec, cfp, calling);
4955
4956 case VM_METHOD_TYPE_ALIAS:
4957 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4958 return vm_call_alias(ec, cfp, calling);
4959
4960 case VM_METHOD_TYPE_OPTIMIZED:
4961 return vm_call_optimized(ec, cfp, calling, ci, cc);
4962
4963 case VM_METHOD_TYPE_UNDEF:
4964 break;
4965
4966 case VM_METHOD_TYPE_ZSUPER:
4967 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4968
4969 case VM_METHOD_TYPE_REFINED:
4970 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4971 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4972 return vm_call_refined(ec, cfp, calling);
4973 }
4974
4975 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4976}
4977
4978NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4979
4980static VALUE
4981vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4982{
4983 /* method missing */
4984 const struct rb_callinfo *ci = calling->cd->ci;
4985 const int stat = ci_missing_reason(ci);
4986
4987 if (vm_ci_mid(ci) == idMethodMissing) {
4988 if (UNLIKELY(calling->heap_argv)) {
4989 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4990 }
4991 else {
4992 rb_control_frame_t *reg_cfp = cfp;
4993 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4994 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4995 }
4996 }
4997 else {
4998 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4999 }
5000}
5001
5002/* Protected method calls and super invocations need to check that the receiver
5003 * (self for super) inherits the module on which the method is defined.
5004 * In the case of refinements, it should consider the original class not the
5005 * refinement.
5006 */
5007static VALUE
5008vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
5009{
5010 VALUE defined_class = me->defined_class;
5011 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5012 return NIL_P(refined_class) ? defined_class : refined_class;
5013}
5014
5015static inline VALUE
5016vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5017{
5018 const struct rb_callinfo *ci = calling->cd->ci;
5019 const struct rb_callcache *cc = calling->cc;
5020
5021 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5022
5023 if (vm_cc_cme(cc) != NULL) {
5024 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5025 case METHOD_VISI_PUBLIC: /* likely */
5026 return vm_call_method_each_type(ec, cfp, calling);
5027
5028 case METHOD_VISI_PRIVATE:
5029 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5030 enum method_missing_reason stat = MISSING_PRIVATE;
5031 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5032
5033 vm_cc_method_missing_reason_set(cc, stat);
5034 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5035 return vm_call_method_missing(ec, cfp, calling);
5036 }
5037 return vm_call_method_each_type(ec, cfp, calling);
5038
5039 case METHOD_VISI_PROTECTED:
5040 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5041 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5042 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5043 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5044 return vm_call_method_missing(ec, cfp, calling);
5045 }
5046 else {
5047 /* caching method info to dummy cc */
5048 VM_ASSERT(vm_cc_cme(cc) != NULL);
5049 struct rb_callcache cc_on_stack = *cc;
5050 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5051 calling->cc = &cc_on_stack;
5052 return vm_call_method_each_type(ec, cfp, calling);
5053 }
5054 }
5055 return vm_call_method_each_type(ec, cfp, calling);
5056
5057 default:
5058 rb_bug("unreachable");
5059 }
5060 }
5061 else {
5062 return vm_call_method_nome(ec, cfp, calling);
5063 }
5064}
5065
5066static VALUE
5067vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5068{
5069 RB_DEBUG_COUNTER_INC(ccf_general);
5070 return vm_call_method(ec, reg_cfp, calling);
5071}
5072
5073void
5074rb_vm_cc_general(const struct rb_callcache *cc)
5075{
5076 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5077 VM_ASSERT(cc != vm_cc_empty());
5078
5079 *(vm_call_handler *)&cc->call_ = vm_call_general;
5080}
5081
5082static VALUE
5083vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5084{
5085 RB_DEBUG_COUNTER_INC(ccf_super_method);
5086
5087 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5088 // can merge the function and the address of the function becomes same.
5089 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5090 if (ec == NULL) rb_bug("unreachable");
5091
5092 /* this check is required to distinguish with other functions. */
5093 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5094 return vm_call_method(ec, reg_cfp, calling);
5095}
5096
5097/* super */
5098
5099static inline VALUE
5100vm_search_normal_superclass(VALUE klass)
5101{
5102 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5103 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5104 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5105 klass = RBASIC(klass)->klass;
5106 }
5107 klass = RCLASS_ORIGIN(klass);
5108 return RCLASS_SUPER(klass);
5109}
5110
5111NORETURN(static void vm_super_outside(void));
5112
5113static void
5114vm_super_outside(void)
5115{
5116 rb_raise(rb_eNoMethodError, "super called outside of method");
5117}
5118
5119static const struct rb_callcache *
5120empty_cc_for_super(void)
5121{
5122 return &vm_empty_cc_for_super;
5123}
5124
5125static const struct rb_callcache *
5126vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5127{
5128 VALUE current_defined_class;
5129 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
5130 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5131
5132 if (!me) {
5133 vm_super_outside();
5134 }
5135
5136 current_defined_class = vm_defined_class_for_protected_call(me);
5137
5138 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5139 iseq != method_entry_iseqptr(me) &&
5140 !rb_obj_is_kind_of(recv, current_defined_class)) {
5141 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5142 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5143
5144 if (m) { /* not bound UnboundMethod */
5145 rb_raise(rb_eTypeError,
5146 "self has wrong type to call super in this context: "
5147 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5148 rb_obj_class(recv), m);
5149 }
5150 }
5151
5152 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5153 rb_raise(rb_eRuntimeError,
5154 "implicit argument passing of super from method defined"
5155 " by define_method() is not supported."
5156 " Specify all arguments explicitly.");
5157 }
5158
5159 ID mid = me->def->original_id;
5160
5161 if (!vm_ci_markable(cd->ci)) {
5162 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5163 }
5164 else {
5165 // update iseq. really? (TODO)
5166 cd->ci = vm_ci_new_runtime(mid,
5167 vm_ci_flag(cd->ci),
5168 vm_ci_argc(cd->ci),
5169 vm_ci_kwarg(cd->ci));
5170
5171 RB_OBJ_WRITTEN(iseq, Qundef, cd->ci);
5172 }
5173
5174 const struct rb_callcache *cc;
5175
5176 VALUE klass = vm_search_normal_superclass(me->defined_class);
5177
5178 if (!klass) {
5179 /* bound instance method of module */
5180 cc = vm_cc_new(Qundef, NULL, vm_call_method_missing, cc_type_super);
5181 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5182 }
5183 else {
5184 cc = vm_search_method_fastpath(reg_cfp, cd, klass);
5185 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5186
5187 // define_method can cache for different method id
5188 if (cached_cme == NULL) {
5189 // empty_cc_for_super is not markable object
5190 cd->cc = empty_cc_for_super();
5191 }
5192 else if (cached_cme->called_id != mid) {
5193 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5194 if (cme) {
5195 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5196 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5197 }
5198 else {
5199 cd->cc = cc = empty_cc_for_super();
5200 }
5201 }
5202 else {
5203 switch (cached_cme->def->type) {
5204 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5205 case VM_METHOD_TYPE_REFINED:
5206 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5207 case VM_METHOD_TYPE_ATTRSET:
5208 case VM_METHOD_TYPE_IVAR:
5209 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5210 break;
5211 default:
5212 break; // use fastpath
5213 }
5214 }
5215 }
5216
5217 VM_ASSERT((vm_cc_cme(cc), true));
5218
5219 return cc;
5220}
5221
5222/* yield */
5223
5224static inline int
5225block_proc_is_lambda(const VALUE procval)
5226{
5227 rb_proc_t *proc;
5228
5229 if (procval) {
5230 GetProcPtr(procval, proc);
5231 return proc->is_lambda;
5232 }
5233 else {
5234 return 0;
5235 }
5236}
5237
5238static VALUE
5239vm_yield_with_cfunc(rb_execution_context_t *ec,
5240 const struct rb_captured_block *captured,
5241 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5243{
5244 int is_lambda = FALSE; /* TODO */
5245 VALUE val, arg, blockarg;
5246 int frame_flag;
5247 const struct vm_ifunc *ifunc = captured->code.ifunc;
5248
5249 if (is_lambda) {
5250 arg = rb_ary_new4(argc, argv);
5251 }
5252 else if (argc == 0) {
5253 arg = Qnil;
5254 }
5255 else {
5256 arg = argv[0];
5257 }
5258
5259 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5260
5261 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5262 if (kw_splat) {
5263 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5264 }
5265
5266 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5267 frame_flag,
5268 self,
5269 VM_GUARDED_PREV_EP(captured->ep),
5270 (VALUE)me,
5271 0, ec->cfp->sp, 0, 0);
5272 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5273 rb_vm_pop_frame(ec);
5274
5275 return val;
5276}
5277
5278VALUE
5279rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5280{
5281 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5282}
5283
5284static VALUE
5285vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5286{
5287 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5288}
5289
5290static inline int
5291vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5292{
5293 int i;
5294 long len = RARRAY_LEN(ary);
5295
5296 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5297
5298 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5299 argv[i] = RARRAY_AREF(ary, i);
5300 }
5301
5302 return i;
5303}
5304
5305static inline VALUE
5306vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5307{
5308 VALUE ary, arg0 = argv[0];
5309 ary = rb_check_array_type(arg0);
5310#if 0
5311 argv[0] = arg0;
5312#else
5313 VM_ASSERT(argv[0] == arg0);
5314#endif
5315 return ary;
5316}
5317
5318static int
5319vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5320{
5321 if (rb_simple_iseq_p(iseq)) {
5322 rb_control_frame_t *cfp = ec->cfp;
5323 VALUE arg0;
5324
5325 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5326
5327 if (arg_setup_type == arg_setup_block &&
5328 calling->argc == 1 &&
5329 ISEQ_BODY(iseq)->param.flags.has_lead &&
5330 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5331 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5332 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5333 }
5334
5335 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5336 if (arg_setup_type == arg_setup_block) {
5337 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5338 int i;
5339 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5340 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5341 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5342 }
5343 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5344 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5345 }
5346 }
5347 else {
5348 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5349 }
5350 }
5351
5352 return 0;
5353 }
5354 else {
5355 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5356 }
5357}
5358
5359static int
5360vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5361{
5362 struct rb_calling_info calling_entry, *calling;
5363
5364 calling = &calling_entry;
5365 calling->argc = argc;
5366 calling->block_handler = block_handler;
5367 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5368 calling->recv = Qundef;
5369 calling->heap_argv = 0;
5370 calling->cc = NULL;
5371 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5372
5373 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5374}
5375
5376/* ruby iseq -> ruby block */
5377
5378static VALUE
5379vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5380 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5381 bool is_lambda, VALUE block_handler)
5382{
5383 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5384 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5385 const int arg_size = ISEQ_BODY(iseq)->param.size;
5386 VALUE * const rsp = GET_SP() - calling->argc;
5387 VALUE * const argv = rsp;
5388 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5389 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5390
5391 SET_SP(rsp);
5392
5393 vm_push_frame(ec, iseq,
5394 frame_flag,
5395 captured->self,
5396 VM_GUARDED_PREV_EP(captured->ep), 0,
5397 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5398 rsp + arg_size,
5399 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5400
5401 return Qundef;
5402}
5403
5404static VALUE
5405vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5406 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5407 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5408{
5409 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5410 int flags = vm_ci_flag(ci);
5411
5412 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5413 ((calling->argc == 0) ||
5414 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5415 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5416 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5417 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5418 flags = 0;
5419 if (UNLIKELY(calling->heap_argv)) {
5420#if VM_ARGC_STACK_MAX < 0
5421 if (RARRAY_LEN(calling->heap_argv) < 1) {
5422 rb_raise(rb_eArgError, "no receiver given");
5423 }
5424#endif
5425 calling->recv = rb_ary_shift(calling->heap_argv);
5426 // Modify stack to avoid cfp consistency error
5427 reg_cfp->sp++;
5428 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5429 reg_cfp->sp[-2] = calling->recv;
5430 flags |= VM_CALL_ARGS_SPLAT;
5431 }
5432 else {
5433 if (calling->argc < 1) {
5434 rb_raise(rb_eArgError, "no receiver given");
5435 }
5436 calling->recv = TOPN(--calling->argc);
5437 }
5438 if (calling->kw_splat) {
5439 flags |= VM_CALL_KW_SPLAT;
5440 }
5441 }
5442 else {
5443 if (calling->argc < 1) {
5444 rb_raise(rb_eArgError, "no receiver given");
5445 }
5446 calling->recv = TOPN(--calling->argc);
5447 }
5448
5449 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5450}
5451
5452static VALUE
5453vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5454 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5455 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5456{
5457 VALUE val;
5458 int argc;
5459 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5460 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5461 argc = calling->argc;
5462 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5463 POPN(argc); /* TODO: should put before C/yield? */
5464 return val;
5465}
5466
5467static VALUE
5468vm_proc_to_block_handler(VALUE procval)
5469{
5470 const struct rb_block *block = vm_proc_block(procval);
5471
5472 switch (vm_block_type(block)) {
5473 case block_type_iseq:
5474 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5475 case block_type_ifunc:
5476 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5477 case block_type_symbol:
5478 return VM_BH_FROM_SYMBOL(block->as.symbol);
5479 case block_type_proc:
5480 return VM_BH_FROM_PROC(block->as.proc);
5481 }
5482 VM_UNREACHABLE(vm_yield_with_proc);
5483 return Qundef;
5484}
5485
5486static VALUE
5487vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5488 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5489 bool is_lambda, VALUE block_handler)
5490{
5491 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5492 VALUE proc = VM_BH_TO_PROC(block_handler);
5493 is_lambda = block_proc_is_lambda(proc);
5494 block_handler = vm_proc_to_block_handler(proc);
5495 }
5496
5497 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5498}
5499
5500static inline VALUE
5501vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5502 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5503 bool is_lambda, VALUE block_handler)
5504{
5505 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5506 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5507 bool is_lambda, VALUE block_handler);
5508
5509 switch (vm_block_handler_type(block_handler)) {
5510 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5511 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5512 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5513 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5514 default: rb_bug("vm_invoke_block: unreachable");
5515 }
5516
5517 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5518}
5519
5520static VALUE
5521vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5522{
5523 const rb_execution_context_t *ec = GET_EC();
5524 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5525 struct rb_captured_block *captured;
5526
5527 if (cfp == 0) {
5528 rb_bug("vm_make_proc_with_iseq: unreachable");
5529 }
5530
5531 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5532 captured->code.iseq = blockiseq;
5533
5534 return rb_vm_make_proc(ec, captured, rb_cProc);
5535}
5536
5537static VALUE
5538vm_once_exec(VALUE iseq)
5539{
5540 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5541 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5542}
5543
5544static VALUE
5545vm_once_clear(VALUE data)
5546{
5547 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5548 is->once.running_thread = NULL;
5549 return Qnil;
5550}
5551
5552/* defined insn */
5553
5554static bool
5555check_respond_to_missing(VALUE obj, VALUE v)
5556{
5557 VALUE args[2];
5558 VALUE r;
5559
5560 args[0] = obj; args[1] = Qfalse;
5561 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5562 if (!UNDEF_P(r) && RTEST(r)) {
5563 return true;
5564 }
5565 else {
5566 return false;
5567 }
5568}
5569
5570static bool
5571vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5572{
5573 VALUE klass;
5574 enum defined_type type = (enum defined_type)op_type;
5575
5576 switch (type) {
5577 case DEFINED_IVAR:
5578 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5579 break;
5580 case DEFINED_GVAR:
5581 return rb_gvar_defined(SYM2ID(obj));
5582 break;
5583 case DEFINED_CVAR: {
5584 const rb_cref_t *cref = vm_get_cref(GET_EP());
5585 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5586 return rb_cvar_defined(klass, SYM2ID(obj));
5587 break;
5588 }
5589 case DEFINED_CONST:
5590 case DEFINED_CONST_FROM: {
5591 bool allow_nil = type == DEFINED_CONST;
5592 klass = v;
5593 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5594 break;
5595 }
5596 case DEFINED_FUNC:
5597 klass = CLASS_OF(v);
5598 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5599 break;
5600 case DEFINED_METHOD:{
5601 VALUE klass = CLASS_OF(v);
5602 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5603
5604 if (me) {
5605 switch (METHOD_ENTRY_VISI(me)) {
5606 case METHOD_VISI_PRIVATE:
5607 break;
5608 case METHOD_VISI_PROTECTED:
5609 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5610 break;
5611 }
5612 case METHOD_VISI_PUBLIC:
5613 return true;
5614 break;
5615 default:
5616 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5617 }
5618 }
5619 else {
5620 return check_respond_to_missing(obj, v);
5621 }
5622 break;
5623 }
5624 case DEFINED_YIELD:
5625 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5626 return true;
5627 }
5628 break;
5629 case DEFINED_ZSUPER:
5630 {
5631 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5632
5633 if (me) {
5634 VALUE klass = vm_search_normal_superclass(me->defined_class);
5635 if (!klass) return false;
5636
5637 ID id = me->def->original_id;
5638
5639 return rb_method_boundp(klass, id, 0);
5640 }
5641 }
5642 break;
5643 case DEFINED_REF:
5644 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5645 default:
5646 rb_bug("unimplemented defined? type (VM)");
5647 break;
5648 }
5649
5650 return false;
5651}
5652
5653bool
5654rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5655{
5656 return vm_defined(ec, reg_cfp, op_type, obj, v);
5657}
5658
5659static const VALUE *
5660vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5661{
5662 rb_num_t i;
5663 const VALUE *ep = reg_ep;
5664 for (i = 0; i < lv; i++) {
5665 ep = GET_PREV_EP(ep);
5666 }
5667 return ep;
5668}
5669
5670static VALUE
5671vm_get_special_object(const VALUE *const reg_ep,
5672 enum vm_special_object_type type)
5673{
5674 switch (type) {
5675 case VM_SPECIAL_OBJECT_VMCORE:
5676 return rb_mRubyVMFrozenCore;
5677 case VM_SPECIAL_OBJECT_CBASE:
5678 return vm_get_cbase(reg_ep);
5679 case VM_SPECIAL_OBJECT_CONST_BASE:
5680 return vm_get_const_base(reg_ep);
5681 default:
5682 rb_bug("putspecialobject insn: unknown value_type %d", type);
5683 }
5684}
5685
5686// ZJIT implementation is using the C function
5687// and needs to call a non-static function
5688VALUE
5689rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5690{
5691 return vm_get_special_object(reg_ep, type);
5692}
5693
5694static VALUE
5695vm_concat_array(VALUE ary1, VALUE ary2st)
5696{
5697 const VALUE ary2 = ary2st;
5698 VALUE tmp1 = rb_check_to_array(ary1);
5699 VALUE tmp2 = rb_check_to_array(ary2);
5700
5701 if (NIL_P(tmp1)) {
5702 tmp1 = rb_ary_new3(1, ary1);
5703 }
5704 if (tmp1 == ary1) {
5705 tmp1 = rb_ary_dup(ary1);
5706 }
5707
5708 if (NIL_P(tmp2)) {
5709 return rb_ary_push(tmp1, ary2);
5710 }
5711 else {
5712 return rb_ary_concat(tmp1, tmp2);
5713 }
5714}
5715
5716static VALUE
5717vm_concat_to_array(VALUE ary1, VALUE ary2st)
5718{
5719 /* ary1 must be a newly created array */
5720 const VALUE ary2 = ary2st;
5721
5722 if (NIL_P(ary2)) return ary1;
5723
5724 VALUE tmp2 = rb_check_to_array(ary2);
5725
5726 if (NIL_P(tmp2)) {
5727 return rb_ary_push(ary1, ary2);
5728 }
5729 else {
5730 return rb_ary_concat(ary1, tmp2);
5731 }
5732}
5733
5734// YJIT implementation is using the C function
5735// and needs to call a non-static function
5736VALUE
5737rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5738{
5739 return vm_concat_array(ary1, ary2st);
5740}
5741
5742VALUE
5743rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5744{
5745 return vm_concat_to_array(ary1, ary2st);
5746}
5747
5748static VALUE
5749vm_splat_array(VALUE flag, VALUE ary)
5750{
5751 if (NIL_P(ary)) {
5752 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5753 }
5754 VALUE tmp = rb_check_to_array(ary);
5755 if (NIL_P(tmp)) {
5756 return rb_ary_new3(1, ary);
5757 }
5758 else if (RTEST(flag)) {
5759 return rb_ary_dup(tmp);
5760 }
5761 else {
5762 return tmp;
5763 }
5764}
5765
5766// YJIT implementation is using the C function
5767// and needs to call a non-static function
5768VALUE
5769rb_vm_splat_array(VALUE flag, VALUE ary)
5770{
5771 return vm_splat_array(flag, ary);
5772}
5773
5774static VALUE
5775vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5776{
5777 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5778
5779 if (flag & VM_CHECKMATCH_ARRAY) {
5780 long i;
5781 const long n = RARRAY_LEN(pattern);
5782
5783 for (i = 0; i < n; i++) {
5784 VALUE v = RARRAY_AREF(pattern, i);
5785 VALUE c = check_match(ec, v, target, type);
5786
5787 if (RTEST(c)) {
5788 return c;
5789 }
5790 }
5791 return Qfalse;
5792 }
5793 else {
5794 return check_match(ec, pattern, target, type);
5795 }
5796}
5797
5798VALUE
5799rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5800{
5801 return vm_check_match(ec, target, pattern, flag);
5802}
5803
5804static VALUE
5805vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5806{
5807 const VALUE kw_bits = *(ep - bits);
5808
5809 if (FIXNUM_P(kw_bits)) {
5810 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5811 if ((idx < VM_KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5812 return Qfalse;
5813 }
5814 else {
5815 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5816 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5817 }
5818 return Qtrue;
5819}
5820
5821static void
5822vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5823{
5824 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5825 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5826 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5827 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5828
5829 switch (flag) {
5830 case RUBY_EVENT_CALL:
5831 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5832 return;
5833 case RUBY_EVENT_C_CALL:
5834 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5835 return;
5836 case RUBY_EVENT_RETURN:
5837 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5838 return;
5840 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5841 return;
5842 }
5843 }
5844}
5845
5846static VALUE
5847vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5848{
5849 if (!rb_const_defined_at(cbase, id)) {
5850 return 0;
5851 }
5852 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5853 return rb_public_const_get_at(cbase, id);
5854 }
5855 else {
5856 return rb_const_get_at(cbase, id);
5857 }
5858}
5859
5860static VALUE
5861vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5862{
5863 if (!RB_TYPE_P(klass, T_CLASS)) {
5864 return 0;
5865 }
5866 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5867 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5868
5869 if (tmp != super) {
5870 rb_raise(rb_eTypeError,
5871 "superclass mismatch for class %"PRIsVALUE"",
5872 rb_id2str(id));
5873 }
5874 else {
5875 return klass;
5876 }
5877 }
5878 else {
5879 return klass;
5880 }
5881}
5882
5883static VALUE
5884vm_check_if_module(ID id, VALUE mod)
5885{
5886 if (!RB_TYPE_P(mod, T_MODULE)) {
5887 return 0;
5888 }
5889 else {
5890 return mod;
5891 }
5892}
5893
5894static VALUE
5895declare_under(ID id, VALUE cbase, VALUE c)
5896{
5897 rb_set_class_path_string(c, cbase, rb_id2str(id));
5898 rb_const_set(cbase, id, c);
5899 return c;
5900}
5901
5902static VALUE
5903vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5904{
5905 /* new class declaration */
5906 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5907 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5908 rb_class_inherited(s, c);
5909 return c;
5910}
5911
5912static VALUE
5913vm_declare_module(ID id, VALUE cbase)
5914{
5915 /* new module declaration */
5916 return declare_under(id, cbase, rb_module_new());
5917}
5918
5919NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5920static void
5921unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5922{
5923 VALUE name = rb_id2str(id);
5924 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5925 name, type);
5926 VALUE location = rb_const_source_location_at(cbase, id);
5927 if (!NIL_P(location)) {
5928 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5929 " previous definition of %"PRIsVALUE" was here",
5930 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5931 }
5933}
5934
5935static VALUE
5936vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5937{
5938 VALUE klass;
5939
5940 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5941 rb_raise(rb_eTypeError,
5942 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5943 rb_obj_class(super));
5944 }
5945
5946 vm_check_if_namespace(cbase);
5947
5948 /* find klass */
5949 rb_autoload_load(cbase, id);
5950
5951 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5952 if (!vm_check_if_class(id, flags, super, klass))
5953 unmatched_redefinition("class", cbase, id, klass);
5954 return klass;
5955 }
5956 else {
5957 return vm_declare_class(id, flags, cbase, super);
5958 }
5959}
5960
5961static VALUE
5962vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5963{
5964 VALUE mod;
5965
5966 vm_check_if_namespace(cbase);
5967 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5968 if (!vm_check_if_module(id, mod))
5969 unmatched_redefinition("module", cbase, id, mod);
5970 return mod;
5971 }
5972 else {
5973 return vm_declare_module(id, cbase);
5974 }
5975}
5976
5977static VALUE
5978vm_find_or_create_class_by_id(ID id,
5979 rb_num_t flags,
5980 VALUE cbase,
5981 VALUE super)
5982{
5983 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5984
5985 switch (type) {
5986 case VM_DEFINECLASS_TYPE_CLASS:
5987 /* classdef returns class scope value */
5988 return vm_define_class(id, flags, cbase, super);
5989
5990 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5991 /* classdef returns class scope value */
5992 return rb_singleton_class(cbase);
5993
5994 case VM_DEFINECLASS_TYPE_MODULE:
5995 /* classdef returns class scope value */
5996 return vm_define_module(id, flags, cbase);
5997
5998 default:
5999 rb_bug("unknown defineclass type: %d", (int)type);
6000 }
6001}
6002
6003static rb_method_visibility_t
6004vm_scope_visibility_get(const rb_execution_context_t *ec)
6005{
6006 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6007
6008 if (!vm_env_cref_by_cref(cfp->ep)) {
6009 return METHOD_VISI_PUBLIC;
6010 }
6011 else {
6012 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6013 }
6014}
6015
6016static int
6017vm_scope_module_func_check(const rb_execution_context_t *ec)
6018{
6019 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6020
6021 if (!vm_env_cref_by_cref(cfp->ep)) {
6022 return FALSE;
6023 }
6024 else {
6025 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6026 }
6027}
6028
6029static void
6030vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6031{
6032 VALUE klass;
6033 rb_method_visibility_t visi;
6034 rb_cref_t *cref = vm_ec_cref(ec);
6035
6036 if (is_singleton) {
6037 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6038 visi = METHOD_VISI_PUBLIC;
6039 }
6040 else {
6041 klass = CREF_CLASS_FOR_DEFINITION(cref);
6042 visi = vm_scope_visibility_get(ec);
6043 }
6044
6045 if (NIL_P(klass)) {
6046 rb_raise(rb_eTypeError, "no class/module to add method");
6047 }
6048
6049 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6050 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6051 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) &&
6052 !RCLASS_SINGLETON_P(klass) &&
6053 (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6054 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6055 }
6056
6057 if (!is_singleton && vm_scope_module_func_check(ec)) {
6058 klass = rb_singleton_class(klass);
6059 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6060 }
6061}
6062
6063// Return the untagged block handler:
6064// * If it's VM_BLOCK_HANDLER_NONE, return nil
6065// * If it's an ISEQ or an IFUNC, fetch it from its rb_captured_block
6066// * If it's a PROC or SYMBOL, return it as is
6067VALUE
6068rb_vm_untag_block_handler(VALUE block_handler)
6069{
6070 if (VM_BLOCK_HANDLER_NONE == block_handler) return Qnil;
6071
6072 switch (vm_block_handler_type(block_handler)) {
6073 case block_handler_type_iseq:
6074 case block_handler_type_ifunc: {
6075 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
6076 return captured->code.val;
6077 }
6078 case block_handler_type_proc:
6079 case block_handler_type_symbol:
6080 return block_handler;
6081 default:
6082 rb_bug("rb_vm_untag_block_handler: unreachable");
6083 }
6084}
6085
6086VALUE
6087rb_vm_get_untagged_block_handler(rb_control_frame_t *reg_cfp)
6088{
6089 return rb_vm_untag_block_handler(VM_CF_BLOCK_HANDLER(reg_cfp));
6090}
6091
6092static VALUE
6093vm_invokeblock_i(struct rb_execution_context_struct *ec,
6094 struct rb_control_frame_struct *reg_cfp,
6095 struct rb_calling_info *calling)
6096{
6097 const struct rb_callinfo *ci = calling->cd->ci;
6098 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6099
6100 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6101 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6102 }
6103 else {
6104 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6105 }
6106}
6107
6108enum method_explorer_type {
6109 mexp_search_method,
6110 mexp_search_invokeblock,
6111 mexp_search_super,
6112};
6113
6114static inline VALUE
6115vm_sendish(
6116 struct rb_execution_context_struct *ec,
6117 struct rb_control_frame_struct *reg_cfp,
6118 struct rb_call_data *cd,
6119 VALUE block_handler,
6120 enum method_explorer_type method_explorer
6121) {
6122 VALUE val = Qundef;
6123 const struct rb_callinfo *ci = cd->ci;
6124 const struct rb_callcache *cc;
6125 int argc = vm_ci_argc(ci);
6126 VALUE recv = TOPN(argc);
6127 struct rb_calling_info calling = {
6128 .block_handler = block_handler,
6129 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6130 .recv = recv,
6131 .argc = argc,
6132 .cd = cd,
6133 };
6134
6135 switch (method_explorer) {
6136 case mexp_search_method:
6137 calling.cc = cc = vm_search_method_fastpath(reg_cfp, cd, CLASS_OF(recv));
6138 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6139 break;
6140 case mexp_search_super:
6141 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6142 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6143 break;
6144 case mexp_search_invokeblock:
6145 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6146 break;
6147 }
6148 return val;
6149}
6150
6151VALUE
6152rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6153{
6154 stack_check(ec);
6155 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6156 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6157 VM_EXEC(ec, val);
6158 return val;
6159}
6160
6161// Fallback for YJIT/ZJIT, not used by the interpreter
6162VALUE
6163rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6164{
6165 stack_check(ec);
6166
6167 struct rb_forwarding_call_data adjusted_cd;
6168 struct rb_callinfo adjusted_ci;
6169
6170 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6171
6172 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6173
6174 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6175 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6176 }
6177
6178 VM_EXEC(ec, val);
6179 return val;
6180}
6181
6182VALUE
6183rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6184{
6185 stack_check(ec);
6186 VALUE bh = VM_BLOCK_HANDLER_NONE;
6187 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6188 VM_EXEC(ec, val);
6189 return val;
6190}
6191
6192VALUE
6193rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6194{
6195 stack_check(ec);
6196
6197 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6198 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6199
6200 VM_EXEC(ec, val);
6201 return val;
6202}
6203
6204// Fallback for YJIT/ZJIT, not used by the interpreter
6205VALUE
6206rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6207{
6208 stack_check(ec);
6209 struct rb_forwarding_call_data adjusted_cd;
6210 struct rb_callinfo adjusted_ci;
6211
6212 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6213
6214 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6215
6216 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6217 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6218 }
6219
6220 VM_EXEC(ec, val);
6221 return val;
6222}
6223
6224VALUE
6225rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6226{
6227 stack_check(ec);
6228 VALUE bh = VM_BLOCK_HANDLER_NONE;
6229 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6230 VM_EXEC(ec, val);
6231 return val;
6232}
6233
6234/* object.c */
6235VALUE rb_nil_to_s(VALUE);
6236VALUE rb_true_to_s(VALUE);
6237VALUE rb_false_to_s(VALUE);
6238/* numeric.c */
6239VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6240VALUE rb_fix_to_s(VALUE);
6241/* variable.c */
6242VALUE rb_mod_to_s(VALUE);
6244
6245static VALUE
6246vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6247{
6248 int type = TYPE(recv);
6249 if (type == T_STRING) {
6250 return recv;
6251 }
6252
6253 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
6254
6255 switch (type) {
6256 case T_SYMBOL:
6257 if (check_method_basic_definition(cme)) {
6258 // rb_sym_to_s() allocates a mutable string, but since we are only
6259 // going to use this string for interpolation, it's fine to use the
6260 // frozen string.
6261 return rb_sym2str(recv);
6262 }
6263 break;
6264 case T_MODULE:
6265 case T_CLASS:
6266 if (check_cfunc(cme, rb_mod_to_s)) {
6267 // rb_mod_to_s() allocates a mutable string, but since we are only
6268 // going to use this string for interpolation, it's fine to use the
6269 // frozen string.
6270 VALUE val = rb_mod_name(recv);
6271 if (NIL_P(val)) {
6272 val = rb_mod_to_s(recv);
6273 }
6274 return val;
6275 }
6276 break;
6277 case T_NIL:
6278 if (check_cfunc(cme, rb_nil_to_s)) {
6279 return rb_nil_to_s(recv);
6280 }
6281 break;
6282 case T_TRUE:
6283 if (check_cfunc(cme, rb_true_to_s)) {
6284 return rb_true_to_s(recv);
6285 }
6286 break;
6287 case T_FALSE:
6288 if (check_cfunc(cme, rb_false_to_s)) {
6289 return rb_false_to_s(recv);
6290 }
6291 break;
6292 case T_FIXNUM:
6293 if (check_cfunc(cme, rb_int_to_s)) {
6294 return rb_fix_to_s(recv);
6295 }
6296 break;
6297 }
6298 return Qundef;
6299}
6300
6301// ZJIT implementation is using the C function
6302// and needs to call a non-static function
6303VALUE
6304rb_vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6305{
6306 return vm_objtostring(reg_cfp, recv, cd);
6307}
6308
6309static VALUE
6310vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6311{
6312 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6313 return ary;
6314 }
6315 else {
6316 return Qundef;
6317 }
6318}
6319
6320static VALUE
6321vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6322{
6323 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6324 return hash;
6325 }
6326 else {
6327 return Qundef;
6328 }
6329}
6330
6331static VALUE
6332vm_opt_str_freeze(VALUE str, int bop, ID id)
6333{
6334 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6335 return str;
6336 }
6337 else {
6338 return Qundef;
6339 }
6340}
6341
6342/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6343#define id_cmp idCmp
6344
6345static VALUE
6346vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6347{
6348 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6349 return rb_ary_includes(ary, target);
6350 }
6351 else {
6352 VALUE args[1] = {target};
6353
6354 // duparray
6355 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6356 VALUE dupary = rb_ary_resurrect(ary);
6357
6358 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6359 }
6360}
6361
6362VALUE
6363rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6364{
6365 return vm_opt_duparray_include_p(ec, ary, target);
6366}
6367
6368static VALUE
6369vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6370{
6371 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6372 if (array_len == 0) {
6373 return Qnil;
6374 }
6375 else {
6376 VALUE result = *ptr;
6377 rb_snum_t i = array_len - 1;
6378 while (i-- > 0) {
6379 const VALUE v = *++ptr;
6380 if (OPTIMIZED_CMP(v, result) > 0) {
6381 result = v;
6382 }
6383 }
6384 return result;
6385 }
6386 }
6387 else {
6388 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6389 }
6390}
6391
6392VALUE
6393rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6394{
6395 return vm_opt_newarray_max(ec, array_len, ptr);
6396}
6397
6398static VALUE
6399vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6400{
6401 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6402 if (array_len == 0) {
6403 return Qnil;
6404 }
6405 else {
6406 VALUE result = *ptr;
6407 rb_snum_t i = array_len - 1;
6408 while (i-- > 0) {
6409 const VALUE v = *++ptr;
6410 if (OPTIMIZED_CMP(v, result) < 0) {
6411 result = v;
6412 }
6413 }
6414 return result;
6415 }
6416 }
6417 else {
6418 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6419 }
6420}
6421
6422VALUE
6423rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6424{
6425 return vm_opt_newarray_min(ec, array_len, ptr);
6426}
6427
6428static VALUE
6429vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6430{
6431 // If Array#hash is _not_ monkeypatched, use the optimized call
6432 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6433 return rb_ary_hash_values(array_len, ptr);
6434 }
6435 else {
6436 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6437 }
6438}
6439
6440VALUE
6441rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6442{
6443 return vm_opt_newarray_hash(ec, array_len, ptr);
6444}
6445
6446VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6447VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6448
6449static VALUE
6450vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6451{
6452 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6453 struct RArray fake_ary = {RBASIC_INIT};
6454 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6455 return rb_ary_includes(ary, target);
6456 }
6457 else {
6458 VALUE args[1] = {target};
6459 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6460 }
6461}
6462
6463VALUE
6464rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6465{
6466 return vm_opt_newarray_include_p(ec, array_len, ptr, target);
6467}
6468
6469static VALUE
6470vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6471{
6472 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6473 struct RArray fake_ary = {RBASIC_INIT};
6474 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6475 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6476 }
6477 else {
6478 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6479 // Setup an array with room for keyword hash.
6480 VALUE args[2];
6481 args[0] = fmt;
6482 int kw_splat = RB_NO_KEYWORDS;
6483 int argc = 1;
6484
6485 if (!UNDEF_P(buffer)) {
6486 args[1] = rb_hash_new_with_size(1);
6487 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6488 kw_splat = RB_PASS_KEYWORDS;
6489 argc++;
6490 }
6491
6492 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idPack, argc, args, kw_splat);
6493 }
6494}
6495
6496VALUE
6497rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6498{
6499 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, buffer);
6500}
6501
6502VALUE
6503rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt)
6504{
6505 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, Qundef);
6506}
6507
6508#undef id_cmp
6509
6510static void
6511vm_track_constant_cache(ID id, void *ic)
6512{
6513 rb_vm_t *vm = GET_VM();
6514 struct rb_id_table *const_cache = &vm->constant_cache;
6515 VALUE lookup_result;
6516 set_table *ics;
6517
6518 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6519 ics = (set_table *)lookup_result;
6520 }
6521 else {
6522 ics = set_init_numtable();
6523 rb_id_table_insert(const_cache, id, (VALUE)ics);
6524 }
6525
6526 /* The call below to st_insert could allocate which could trigger a GC.
6527 * If it triggers a GC, it may free an iseq that also holds a cache to this
6528 * constant. If that iseq is the last iseq with a cache to this constant, then
6529 * it will free this ST table, which would cause an use-after-free during this
6530 * st_insert.
6531 *
6532 * So to fix this issue, we store the ID that is currently being inserted
6533 * and, in remove_from_constant_cache, we don't free the ST table for ID
6534 * equal to this one.
6535 *
6536 * See [Bug #20921].
6537 */
6538 vm->inserting_constant_cache_id = id;
6539
6540 set_insert(ics, (st_data_t)ic);
6541
6542 vm->inserting_constant_cache_id = (ID)0;
6543}
6544
6545static void
6546vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6547{
6548 RB_VM_LOCKING() {
6549 for (int i = 0; segments[i]; i++) {
6550 ID id = segments[i];
6551 if (id == idNULL) continue;
6552 vm_track_constant_cache(id, ic);
6553 }
6554 }
6555}
6556
6557// For JIT inlining
6558static inline bool
6559vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6560{
6561 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6562 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6563
6564 return (ic_cref == NULL || // no need to check CREF
6565 ic_cref == vm_get_cref(reg_ep));
6566 }
6567 return false;
6568}
6569
6570static bool
6571vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6572{
6573 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6574 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6575}
6576
6577// YJIT needs this function to never allocate and never raise
6578bool
6579rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6580{
6581 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6582}
6583
6584static void
6585vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6586{
6587 if (ruby_vm_const_missing_count > 0) {
6588 ruby_vm_const_missing_count = 0;
6589 ic->entry = NULL;
6590 return;
6591 }
6592
6593 struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6594 RB_OBJ_WRITE(ice, &ice->value, val);
6595 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6596
6597 if (rb_ractor_shareable_p(val)) {
6598 RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
6599 ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6600 }
6601 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6602 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6603 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6604 rb_yjit_constant_ic_update(iseq, ic, pos);
6605}
6606
6607VALUE
6608rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6609{
6610 VALUE val;
6611 const ID *segments = ic->segments;
6612 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6613
6614 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6615 val = ice->value;
6616
6617 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6618 }
6619 else {
6620 ruby_vm_constant_cache_misses++;
6621 val = vm_get_ev_const_chain(ec, segments);
6622 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6623 // Undo the PC increment to get the address to this instruction
6624 // INSN_ATTR(width) == 2
6625 vm_ic_update(CFP_ISEQ(GET_CFP()), ic, val, GET_EP(), CFP_PC(GET_CFP()) - 2);
6626 }
6627 return val;
6628}
6629
6630static VALUE
6631vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6632{
6633 rb_thread_t *th = rb_ec_thread_ptr(ec);
6634 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6635
6636 again:
6637 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6638 return is->once.value;
6639 }
6640 else if (is->once.running_thread == NULL) {
6641 VALUE val;
6642 is->once.running_thread = th;
6643 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6644 // TODO: confirm that it is shareable
6645
6646 if (RB_FL_ABLE(val)) {
6647 RB_OBJ_SET_SHAREABLE(val);
6648 }
6649
6650 RB_OBJ_WRITE(CFP_ISEQ(ec->cfp), &is->once.value, val);
6651
6652 /* is->once.running_thread is cleared by vm_once_clear() */
6653 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6654 return val;
6655 }
6656 else if (is->once.running_thread == th) {
6657 /* recursive once */
6658 return vm_once_exec((VALUE)iseq);
6659 }
6660 else {
6661 /* waiting for finish */
6662 RUBY_VM_CHECK_INTS(ec);
6664 goto again;
6665 }
6666}
6667
6668static OFFSET
6669vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6670{
6671 switch (OBJ_BUILTIN_TYPE(key)) {
6672 case -1:
6673 case T_FLOAT:
6674 case T_SYMBOL:
6675 case T_BIGNUM:
6676 case T_STRING:
6677 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6678 SYMBOL_REDEFINED_OP_FLAG |
6679 INTEGER_REDEFINED_OP_FLAG |
6680 FLOAT_REDEFINED_OP_FLAG |
6681 NIL_REDEFINED_OP_FLAG |
6682 TRUE_REDEFINED_OP_FLAG |
6683 FALSE_REDEFINED_OP_FLAG |
6684 STRING_REDEFINED_OP_FLAG)) {
6685 st_data_t val;
6686 if (RB_FLOAT_TYPE_P(key)) {
6687 double kval = RFLOAT_VALUE(key);
6688 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6689 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6690 }
6691 }
6692 if (rb_hash_stlike_lookup(hash, key, &val)) {
6693 return FIX2LONG((VALUE)val);
6694 }
6695 else {
6696 return else_offset;
6697 }
6698 }
6699 }
6700 return 0;
6701}
6702
6703NORETURN(static void
6704 vm_stack_consistency_error(const rb_execution_context_t *ec,
6705 const rb_control_frame_t *,
6706 const VALUE *));
6707static void
6708vm_stack_consistency_error(const rb_execution_context_t *ec,
6709 const rb_control_frame_t *cfp,
6710 const VALUE *bp)
6711{
6712 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6713 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6714 static const char stack_consistency_error[] =
6715 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6716#if defined RUBY_DEVEL
6717 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6718 rb_str_cat_cstr(mesg, "\n");
6719 rb_str_append(mesg, rb_iseq_disasm(CFP_ISEQ(cfp)));
6721#else
6722 rb_bug(stack_consistency_error, nsp, nbp);
6723#endif
6724}
6725
6726static VALUE
6727vm_opt_plus(VALUE recv, VALUE obj)
6728{
6729 if (FIXNUM_2_P(recv, obj) &&
6730 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6731 return rb_fix_plus_fix(recv, obj);
6732 }
6733 else if (FLONUM_2_P(recv, obj) &&
6734 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6735 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6736 }
6737 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6738 return Qundef;
6739 }
6740 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6741 RBASIC_CLASS(obj) == rb_cFloat &&
6742 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6743 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6744 }
6745 else if (RBASIC_CLASS(recv) == rb_cString &&
6746 RBASIC_CLASS(obj) == rb_cString &&
6747 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6748 return rb_str_opt_plus(recv, obj);
6749 }
6750 else if (RBASIC_CLASS(recv) == rb_cArray &&
6751 RBASIC_CLASS(obj) == rb_cArray &&
6752 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6753 return rb_ary_plus(recv, obj);
6754 }
6755 else {
6756 return Qundef;
6757 }
6758}
6759
6760static VALUE
6761vm_opt_minus(VALUE recv, VALUE obj)
6762{
6763 if (FIXNUM_2_P(recv, obj) &&
6764 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6765 return rb_fix_minus_fix(recv, obj);
6766 }
6767 else if (FLONUM_2_P(recv, obj) &&
6768 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6769 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6770 }
6771 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6772 return Qundef;
6773 }
6774 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6775 RBASIC_CLASS(obj) == rb_cFloat &&
6776 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6777 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6778 }
6779 else {
6780 return Qundef;
6781 }
6782}
6783
6784static VALUE
6785vm_opt_mult(VALUE recv, VALUE obj)
6786{
6787 if (FIXNUM_2_P(recv, obj) &&
6788 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6789 return rb_fix_mul_fix(recv, obj);
6790 }
6791 else if (FLONUM_2_P(recv, obj) &&
6792 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6793 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6794 }
6795 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6796 return Qundef;
6797 }
6798 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6799 RBASIC_CLASS(obj) == rb_cFloat &&
6800 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6801 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6802 }
6803 else {
6804 return Qundef;
6805 }
6806}
6807
6808static VALUE
6809vm_opt_div(VALUE recv, VALUE obj)
6810{
6811 if (FIXNUM_2_P(recv, obj) &&
6812 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6813 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6814 }
6815 else if (FLONUM_2_P(recv, obj) &&
6816 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6817 return rb_flo_div_flo(recv, obj);
6818 }
6819 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6820 return Qundef;
6821 }
6822 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6823 RBASIC_CLASS(obj) == rb_cFloat &&
6824 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6825 return rb_flo_div_flo(recv, obj);
6826 }
6827 else {
6828 return Qundef;
6829 }
6830}
6831
6832static VALUE
6833vm_opt_mod(VALUE recv, VALUE obj)
6834{
6835 if (FIXNUM_2_P(recv, obj) &&
6836 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6837 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6838 }
6839 else if (FLONUM_2_P(recv, obj) &&
6840 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6841 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6842 }
6843 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6844 return Qundef;
6845 }
6846 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6847 RBASIC_CLASS(obj) == rb_cFloat &&
6848 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6849 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6850 }
6851 else {
6852 return Qundef;
6853 }
6854}
6855
6856static VALUE
6857vm_opt_neq(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6858{
6859 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not_equal)) {
6860 VALUE val = opt_equality(reg_cfp, recv, obj, cd_eq);
6861
6862 if (!UNDEF_P(val)) {
6863 return RBOOL(!RTEST(val));
6864 }
6865 }
6866
6867 return Qundef;
6868}
6869
6870static VALUE
6871vm_opt_lt(VALUE recv, VALUE obj)
6872{
6873 if (FIXNUM_2_P(recv, obj) &&
6874 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6875 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6876 }
6877 else if (FLONUM_2_P(recv, obj) &&
6878 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6879 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6880 }
6881 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6882 return Qundef;
6883 }
6884 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6885 RBASIC_CLASS(obj) == rb_cFloat &&
6886 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6887 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6888 }
6889 else {
6890 return Qundef;
6891 }
6892}
6893
6894static VALUE
6895vm_opt_le(VALUE recv, VALUE obj)
6896{
6897 if (FIXNUM_2_P(recv, obj) &&
6898 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6899 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6900 }
6901 else if (FLONUM_2_P(recv, obj) &&
6902 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6903 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6904 }
6905 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6906 return Qundef;
6907 }
6908 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6909 RBASIC_CLASS(obj) == rb_cFloat &&
6910 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6911 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6912 }
6913 else {
6914 return Qundef;
6915 }
6916}
6917
6918static VALUE
6919vm_opt_gt(VALUE recv, VALUE obj)
6920{
6921 if (FIXNUM_2_P(recv, obj) &&
6922 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6923 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6924 }
6925 else if (FLONUM_2_P(recv, obj) &&
6926 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6927 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6928 }
6929 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6930 return Qundef;
6931 }
6932 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6933 RBASIC_CLASS(obj) == rb_cFloat &&
6934 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6935 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6936 }
6937 else {
6938 return Qundef;
6939 }
6940}
6941
6942static VALUE
6943vm_opt_ge(VALUE recv, VALUE obj)
6944{
6945 if (FIXNUM_2_P(recv, obj) &&
6946 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6947 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6948 }
6949 else if (FLONUM_2_P(recv, obj) &&
6950 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6951 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6952 }
6953 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6954 return Qundef;
6955 }
6956 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6957 RBASIC_CLASS(obj) == rb_cFloat &&
6958 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6959 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6960 }
6961 else {
6962 return Qundef;
6963 }
6964}
6965
6966
6967static VALUE
6968vm_opt_ltlt(VALUE recv, VALUE obj)
6969{
6970 if (SPECIAL_CONST_P(recv)) {
6971 return Qundef;
6972 }
6973 else if (RBASIC_CLASS(recv) == rb_cString &&
6974 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6975 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6976 return rb_str_buf_append(recv, obj);
6977 }
6978 else {
6979 return rb_str_concat(recv, obj);
6980 }
6981 }
6982 else if (RBASIC_CLASS(recv) == rb_cArray &&
6983 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6984 return rb_ary_push(recv, obj);
6985 }
6986 else {
6987 return Qundef;
6988 }
6989}
6990
6991static VALUE
6992vm_opt_and(VALUE recv, VALUE obj)
6993{
6994 // If recv and obj are both fixnums, then the bottom tag bit
6995 // will be 1 on both. 1 & 1 == 1, so the result value will also
6996 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6997 // will be 0, and we return Qundef.
6998 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6999
7000 if (FIXNUM_P(ret) &&
7001 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
7002 return ret;
7003 }
7004 else {
7005 return Qundef;
7006 }
7007}
7008
7009static VALUE
7010vm_opt_or(VALUE recv, VALUE obj)
7011{
7012 if (FIXNUM_2_P(recv, obj) &&
7013 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
7014 return recv | obj;
7015 }
7016 else {
7017 return Qundef;
7018 }
7019}
7020
7021static VALUE
7022vm_opt_aref(VALUE recv, VALUE obj)
7023{
7024 if (SPECIAL_CONST_P(recv)) {
7025 if (FIXNUM_2_P(recv, obj) &&
7026 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
7027 return rb_fix_aref(recv, obj);
7028 }
7029 return Qundef;
7030 }
7031 else if (RBASIC_CLASS(recv) == rb_cArray &&
7032 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7033 if (FIXNUM_P(obj)) {
7034 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7035 }
7036 else {
7037 return rb_ary_aref1(recv, obj);
7038 }
7039 }
7040 else if (RBASIC_CLASS(recv) == rb_cHash &&
7041 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7042 return rb_hash_aref(recv, obj);
7043 }
7044 else {
7045 return Qundef;
7046 }
7047}
7048
7049static VALUE
7050vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7051{
7052 if (SPECIAL_CONST_P(recv)) {
7053 return Qundef;
7054 }
7055 else if (RBASIC_CLASS(recv) == rb_cArray &&
7056 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7057 FIXNUM_P(obj)) {
7058 rb_ary_store(recv, FIX2LONG(obj), set);
7059 return set;
7060 }
7061 else if (RBASIC_CLASS(recv) == rb_cHash &&
7062 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7063 rb_hash_aset(recv, obj, set);
7064 return set;
7065 }
7066 else {
7067 return Qundef;
7068 }
7069}
7070
7071static VALUE
7072vm_opt_length(VALUE recv, int bop)
7073{
7074 if (SPECIAL_CONST_P(recv)) {
7075 return Qundef;
7076 }
7077 else if (RBASIC_CLASS(recv) == rb_cString &&
7078 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7079 if (bop == BOP_EMPTY_P) {
7080 return LONG2NUM(RSTRING_LEN(recv));
7081 }
7082 else {
7083 return rb_str_length(recv);
7084 }
7085 }
7086 else if (RBASIC_CLASS(recv) == rb_cArray &&
7087 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7088 return LONG2NUM(RARRAY_LEN(recv));
7089 }
7090 else if (RBASIC_CLASS(recv) == rb_cHash &&
7091 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7092 return INT2FIX(RHASH_SIZE(recv));
7093 }
7094 else {
7095 return Qundef;
7096 }
7097}
7098
7099static VALUE
7100vm_opt_empty_p(VALUE recv)
7101{
7102 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7103 case Qundef: return Qundef;
7104 case INT2FIX(0): return Qtrue;
7105 default: return Qfalse;
7106 }
7107}
7108
7109VALUE rb_false(VALUE obj);
7110
7111static VALUE
7112vm_opt_nil_p(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7113{
7114 if (NIL_P(recv) &&
7115 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7116 return Qtrue;
7117 }
7118 else if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_false)) {
7119 return Qfalse;
7120 }
7121 else {
7122 return Qundef;
7123 }
7124}
7125
7126static VALUE
7127fix_succ(VALUE x)
7128{
7129 switch (x) {
7130 case ~0UL:
7131 /* 0xFFFF_FFFF == INT2FIX(-1)
7132 * `-1.succ` is of course 0. */
7133 return INT2FIX(0);
7134 case RSHIFT(~0UL, 1):
7135 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7136 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7137 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7138 default:
7139 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7140 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7141 * == lx*2 + ly*2 + 1
7142 * == (lx*2+1) + (ly*2+1) - 1
7143 * == x + y - 1
7144 *
7145 * Here, if we put y := INT2FIX(1):
7146 *
7147 * == x + INT2FIX(1) - 1
7148 * == x + 2 .
7149 */
7150 return x + 2;
7151 }
7152}
7153
7154static VALUE
7155vm_opt_succ(VALUE recv)
7156{
7157 if (FIXNUM_P(recv) &&
7158 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7159 return fix_succ(recv);
7160 }
7161 else if (SPECIAL_CONST_P(recv)) {
7162 return Qundef;
7163 }
7164 else if (RBASIC_CLASS(recv) == rb_cString &&
7165 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7166 return rb_str_succ(recv);
7167 }
7168 else {
7169 return Qundef;
7170 }
7171}
7172
7173static VALUE
7174vm_opt_not(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7175{
7176 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not)) {
7177 return RBOOL(!RTEST(recv));
7178 }
7179 else {
7180 return Qundef;
7181 }
7182}
7183
7184static VALUE
7185vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7186{
7187 if (SPECIAL_CONST_P(recv)) {
7188 return Qundef;
7189 }
7190 else if (RBASIC_CLASS(recv) == rb_cString &&
7191 CLASS_OF(obj) == rb_cRegexp &&
7192 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7193 return rb_reg_match(obj, recv);
7194 }
7195 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7196 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7197 return rb_reg_match(recv, obj);
7198 }
7199 else {
7200 return Qundef;
7201 }
7202}
7203
7204rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7205
7206NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7207
7208static inline void
7209vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7210 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7211 rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
7212{
7213 rb_event_flag_t event = pc_events & target_event;
7214 VALUE self = GET_SELF();
7215
7216 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7217
7218 if (local_hooks) local_hooks->running++; // make sure they don't get deleted while global hooks run
7219
7220 if (event & global_hooks->events) {
7221 /* increment PC because source line is calculated with PC-1 */
7222 reg_cfp->pc++;
7223 vm_dtrace(event, ec);
7224 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7225 reg_cfp->pc--;
7226 }
7227
7228 if (local_hooks) local_hooks->running--;
7229 if (local_hooks != NULL) {
7230 if (event & local_hooks->events) {
7231 /* increment PC because source line is calculated with PC-1 */
7232 reg_cfp->pc++;
7233 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7234 reg_cfp->pc--;
7235 }
7236 }
7237}
7238
7239#define VM_TRACE_HOOK(target_event, val) do { \
7240 if ((pc_events & (target_event)) & enabled_flags) { \
7241 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
7242 } \
7243} while (0)
7244
7245static VALUE
7246rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7247{
7248 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7249 VM_ASSERT(ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_RESCUE);
7250 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7251}
7252
7253static void
7254vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7255{
7256 const VALUE *pc = reg_cfp->pc;
7257 rb_ractor_t *r = rb_ec_ractor_ptr(ec);
7258 rb_event_flag_t enabled_flags = r->pub.hooks.events & ISEQ_TRACE_EVENTS;
7259 rb_event_flag_t ractor_events = enabled_flags;
7260
7261 if (enabled_flags == 0 && rb_ractor_targeted_hooks_cnt(r) == 0) {
7262 return;
7263 }
7264 else {
7265 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
7266 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7267 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7268 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
7269 rb_hook_list_t *local_hooks = NULL;
7270 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
7271 st_data_t val;
7272 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)iseq, &val)) {
7273 local_hooks = (rb_hook_list_t*)val;
7274 }
7275 }
7276 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7277
7278 rb_hook_list_t *bmethod_local_hooks = NULL;
7279 rb_event_flag_t bmethod_local_events = 0;
7280 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7281 enabled_flags |= iseq_local_events;
7282
7283 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7284
7285 if (bmethod_frame) {
7286 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7287 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7288 unsigned int bmethod_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
7289 if (RB_UNLIKELY(bmethod_hooks_cnt > 0)) {
7290 st_data_t val;
7291 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)me->def, &val)) {
7292 bmethod_local_hooks = (rb_hook_list_t*)val;
7293 }
7294 if (bmethod_local_hooks) {
7295 bmethod_local_events = bmethod_local_hooks->events;
7296 }
7297 }
7298 }
7299
7300 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7301#if 0
7302 /* disable trace */
7303 /* TODO: incomplete */
7304 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7305#else
7306 /* do not disable trace because of performance problem
7307 * (re-enable overhead)
7308 */
7309#endif
7310 return;
7311 }
7312 else if (ec->trace_arg != NULL) {
7313 /* already tracing */
7314 return;
7315 }
7316 else {
7317 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7318 /* Note, not considering iseq local events here since the same
7319 * iseq could be used in multiple bmethods. */
7320 rb_event_flag_t bmethod_events = ractor_events | bmethod_local_events;
7321
7322 if (0) {
7323 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7324 (int)pos,
7325 (int)pc_events,
7326 RSTRING_PTR(rb_iseq_path(iseq)),
7327 (int)rb_iseq_line_no(iseq, pos),
7328 RSTRING_PTR(rb_iseq_label(iseq)));
7329 }
7330 VM_ASSERT(reg_cfp->pc == pc);
7331 VM_ASSERT(pc_events != 0);
7332
7333 /* check traces */
7334 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7335 /* b_call instruction running as a method. Fire call event. */
7336 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
7337 }
7339 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7340 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7341 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7342 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7343 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7344 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7345 /* b_return instruction running as a method. Fire return event. */
7346 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
7347 }
7348 }
7349 }
7350}
7351#undef VM_TRACE_HOOK
7352
7353#if VM_CHECK_MODE > 0
7354NORETURN( NOINLINE( COLDFUNC
7355void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7356
7357void
7358Init_vm_stack_canary(void)
7359{
7360 /* This has to be called _after_ our PRNG is properly set up. */
7361 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7362 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7363
7364 vm_stack_canary_was_born = true;
7365 VM_ASSERT(n == 0);
7366}
7367
7368void
7369rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7370{
7371 /* Because a method has already been called, why not call
7372 * another one. */
7373 const char *insn = rb_insns_name(i);
7374 VALUE inspection = rb_inspect(c);
7375 const char *str = StringValueCStr(inspection);
7376
7377 rb_bug("dead canary found at %s: %s", insn, str);
7378}
7379
7380#else
7381void Init_vm_stack_canary(void) { /* nothing to do */ }
7382#endif
7383
7384
7385/* a part of the following code is generated by this ruby script:
7386
738716.times{|i|
7388 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7389 typedef_args.prepend(", ") if i != 0
7390 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7391 call_args.prepend(", ") if i != 0
7392 puts %Q{
7393static VALUE
7394builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7395{
7396 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7397 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7398}}
7399}
7400
7401puts
7402puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
740316.times{|i|
7404 puts " builtin_invoker#{i},"
7405}
7406puts "};"
7407*/
7408
7409static VALUE
7410builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7411{
7412 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7413 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7414}
7415
7416static VALUE
7417builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7418{
7419 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7420 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7421}
7422
7423static VALUE
7424builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7425{
7426 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7427 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7428}
7429
7430static VALUE
7431builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7432{
7433 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7434 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7435}
7436
7437static VALUE
7438builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7439{
7440 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7441 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7442}
7443
7444static VALUE
7445builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7446{
7447 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7448 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7449}
7450
7451static VALUE
7452builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7453{
7454 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7455 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7456}
7457
7458static VALUE
7459builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7460{
7461 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7462 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7463}
7464
7465static VALUE
7466builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7467{
7468 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7469 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7470}
7471
7472static VALUE
7473builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7474{
7475 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7476 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7477}
7478
7479static VALUE
7480builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7481{
7482 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7483 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7484}
7485
7486static VALUE
7487builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7488{
7489 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7490 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7491}
7492
7493static VALUE
7494builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7495{
7496 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7497 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7498}
7499
7500static VALUE
7501builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7502{
7503 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7504 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7505}
7506
7507static VALUE
7508builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7509{
7510 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7511 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7512}
7513
7514static VALUE
7515builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7516{
7517 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7518 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7519}
7520
7521typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7522
7523static builtin_invoker
7524lookup_builtin_invoker(int argc)
7525{
7526 static const builtin_invoker invokers[] = {
7527 builtin_invoker0,
7528 builtin_invoker1,
7529 builtin_invoker2,
7530 builtin_invoker3,
7531 builtin_invoker4,
7532 builtin_invoker5,
7533 builtin_invoker6,
7534 builtin_invoker7,
7535 builtin_invoker8,
7536 builtin_invoker9,
7537 builtin_invoker10,
7538 builtin_invoker11,
7539 builtin_invoker12,
7540 builtin_invoker13,
7541 builtin_invoker14,
7542 builtin_invoker15,
7543 };
7544
7545 return invokers[argc];
7546}
7547
7548static inline VALUE
7549invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7550{
7551 const bool canary_p = ISEQ_BODY(CFP_ISEQ(reg_cfp))->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7552 SETUP_CANARY(canary_p);
7553 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7554 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7555 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7556 return ret;
7557}
7558
7559static VALUE
7560vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7561{
7562 return invoke_bf(ec, cfp, bf, argv);
7563}
7564
7565static VALUE
7566vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7567{
7568 if (0) { // debug print
7569 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7570 for (int i=0; i<bf->argc; i++) {
7571 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(CFP_ISEQ(cfp))->local_table[i+start_index]));
7572 }
7573 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7574 (void *)(uintptr_t)bf->func_ptr);
7575 }
7576
7577 if (bf->argc == 0) {
7578 return invoke_bf(ec, cfp, bf, NULL);
7579 }
7580 else {
7581 const VALUE *argv = cfp->ep - ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7582 return invoke_bf(ec, cfp, bf, argv);
7583 }
7584}
7585
7586// for __builtin_inline!()
7587
7588VALUE
7589rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7590{
7591 const rb_control_frame_t *cfp = ec->cfp;
7592 return cfp->ep[index];
7593}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
static bool RB_FL_ABLE(VALUE obj)
Checks if the object is flaggable.
Definition fl_type.h:381
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2850
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1620
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1517
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1496
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:126
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3898
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
VALUE rb_eFatal
fatal exception.
Definition error.c:1423
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1435
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:674
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4219
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1478
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2254
VALUE rb_cRegexp
Regexp class.
Definition re.c:2664
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1325
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:657
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:226
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:894
VALUE rb_cFloat
Float class.
Definition numeric.c:198
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1169
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1952
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3796
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1927
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2009
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1910
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1976
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2042
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3834
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5379
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3800
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4071
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2435
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:968
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1514
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3476
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2047
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4263
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4317
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1515
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3954
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3306
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3482
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:423
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2126
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3814
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4340
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3808
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:689
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1740
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2310
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1164
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument must be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:289
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:37
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376