Ruby 4.0.0dev (2025-11-27 revision 2e770cdf773d79327cfdeb8178a1cb9b340f4560)
vm_insnhelper.c (2e770cdf773d79327cfdeb8178a1cb9b340f4560)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions. Included into vm.c.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* does not include singleton class */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 ent->cref = cref;
1621 ic->entry = ent;
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1625 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1626 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1627
1628 return cvar_value;
1629}
1630
1631static inline VALUE
1632vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1633{
1634 const rb_cref_t *cref;
1635 cref = vm_get_cref(GET_EP());
1636
1637 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1638 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1639
1640 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1641 RUBY_ASSERT(!UNDEF_P(v));
1642
1643 return v;
1644 }
1645
1646 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647
1648 return update_classvariable_cache(iseq, klass, id, cref, ic);
1649}
1650
1651VALUE
1652rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1653{
1654 return vm_getclassvariable(iseq, cfp, id, ic);
1655}
1656
1657static inline void
1658vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1659{
1660 const rb_cref_t *cref;
1661 cref = vm_get_cref(GET_EP());
1662
1663 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1664 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1665
1666 rb_class_ivar_set(ic->entry->class_value, id, val);
1667 return;
1668 }
1669
1670 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1671
1672 rb_cvar_set(klass, id, val);
1673
1674 update_classvariable_cache(iseq, klass, id, cref, ic);
1675}
1676
1677void
1678rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1679{
1680 vm_setclassvariable(iseq, cfp, id, val, ic);
1681}
1682
1683static inline VALUE
1684vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1685{
1686 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1687}
1688
1689static inline void
1690vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1691{
1692 if (RB_SPECIAL_CONST_P(obj)) {
1694 return;
1695 }
1696
1697 shape_id_t dest_shape_id;
1698 attr_index_t index;
1699 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1700
1701 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1702 switch (BUILTIN_TYPE(obj)) {
1703 case T_OBJECT:
1704 break;
1705 case T_CLASS:
1706 case T_MODULE:
1707 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1708 return;
1709 }
1710 break;
1711 default:
1712 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1713 return;
1714 }
1715 }
1716 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1717 }
1718}
1719
1720void
1721rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1722{
1723 vm_setinstancevariable(iseq, obj, id, val, ic);
1724}
1725
1726VALUE
1727rb_vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1728{
1729 return vm_getinstancevariable(iseq, obj, id, ic);
1730}
1731
1732static VALUE
1733vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1734{
1735 /* continue throw */
1736
1737 if (FIXNUM_P(err)) {
1738 ec->tag->state = RUBY_TAG_FATAL;
1739 }
1740 else if (SYMBOL_P(err)) {
1741 ec->tag->state = TAG_THROW;
1742 }
1743 else if (THROW_DATA_P(err)) {
1744 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1745 }
1746 else {
1747 ec->tag->state = TAG_RAISE;
1748 }
1749 return err;
1750}
1751
1752static VALUE
1753vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1754 const int flag, const VALUE throwobj)
1755{
1756 const rb_control_frame_t *escape_cfp = NULL;
1757 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1758
1759 if (flag != 0) {
1760 /* do nothing */
1761 }
1762 else if (state == TAG_BREAK) {
1763 int is_orphan = 1;
1764 const VALUE *ep = GET_EP();
1765 const rb_iseq_t *base_iseq = GET_ISEQ();
1766 escape_cfp = reg_cfp;
1767
1768 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1769 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1770 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1771 ep = escape_cfp->ep;
1772 base_iseq = escape_cfp->iseq;
1773 }
1774 else {
1775 ep = VM_ENV_PREV_EP(ep);
1776 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1777 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1778 VM_ASSERT(escape_cfp->iseq == base_iseq);
1779 }
1780 }
1781
1782 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1783 /* lambda{... break ...} */
1784 is_orphan = 0;
1785 state = TAG_RETURN;
1786 }
1787 else {
1788 ep = VM_ENV_PREV_EP(ep);
1789
1790 while (escape_cfp < eocfp) {
1791 if (escape_cfp->ep == ep) {
1792 const rb_iseq_t *const iseq = escape_cfp->iseq;
1793 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1794 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1795 unsigned int i;
1796
1797 if (!ct) break;
1798 for (i=0; i < ct->size; i++) {
1799 const struct iseq_catch_table_entry *const entry =
1800 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1801
1802 if (entry->type == CATCH_TYPE_BREAK &&
1803 entry->iseq == base_iseq &&
1804 entry->start < epc && entry->end >= epc) {
1805 if (entry->cont == epc) { /* found! */
1806 is_orphan = 0;
1807 }
1808 break;
1809 }
1810 }
1811 break;
1812 }
1813
1814 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1815 }
1816 }
1817
1818 if (is_orphan) {
1819 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1820 }
1821 }
1822 else if (state == TAG_RETRY) {
1823 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1824
1825 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1826 }
1827 else if (state == TAG_RETURN) {
1828 const VALUE *current_ep = GET_EP();
1829 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1830 int in_class_frame = 0;
1831 int toplevel = 1;
1832 escape_cfp = reg_cfp;
1833
1834 // find target_lep, target_ep
1835 while (!VM_ENV_LOCAL_P(ep)) {
1836 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1837 target_ep = ep;
1838 }
1839 ep = VM_ENV_PREV_EP(ep);
1840 }
1841 target_lep = ep;
1842
1843 while (escape_cfp < eocfp) {
1844 const VALUE *lep = VM_CF_LEP(escape_cfp);
1845
1846 if (!target_lep) {
1847 target_lep = lep;
1848 }
1849
1850 if (lep == target_lep &&
1851 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1852 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1853 in_class_frame = 1;
1854 target_lep = 0;
1855 }
1856
1857 if (lep == target_lep) {
1858 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1859 toplevel = 0;
1860 if (in_class_frame) {
1861 /* lambda {class A; ... return ...; end} */
1862 goto valid_return;
1863 }
1864 else {
1865 const VALUE *tep = current_ep;
1866
1867 while (target_lep != tep) {
1868 if (escape_cfp->ep == tep) {
1869 /* in lambda */
1870 if (tep == target_ep) {
1871 goto valid_return;
1872 }
1873 else {
1874 goto unexpected_return;
1875 }
1876 }
1877 tep = VM_ENV_PREV_EP(tep);
1878 }
1879 }
1880 }
1881 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1882 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1883 case ISEQ_TYPE_TOP:
1884 case ISEQ_TYPE_MAIN:
1885 if (toplevel) {
1886 if (in_class_frame) goto unexpected_return;
1887 if (target_ep == NULL) {
1888 goto valid_return;
1889 }
1890 else {
1891 goto unexpected_return;
1892 }
1893 }
1894 break;
1895 case ISEQ_TYPE_EVAL: {
1896 const rb_iseq_t *is = escape_cfp->iseq;
1897 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1898 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1899 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1900 t = ISEQ_BODY(is)->type;
1901 }
1902 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1903 break;
1904 }
1905 case ISEQ_TYPE_CLASS:
1906 toplevel = 0;
1907 break;
1908 default:
1909 break;
1910 }
1911 }
1912 }
1913
1914 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1915 if (target_ep == NULL) {
1916 goto valid_return;
1917 }
1918 else {
1919 goto unexpected_return;
1920 }
1921 }
1922
1923 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1924 }
1925 unexpected_return:;
1926 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1927
1928 valid_return:;
1929 /* do nothing */
1930 }
1931 else {
1932 rb_bug("isns(throw): unsupported throw type");
1933 }
1934
1935 ec->tag->state = state;
1936 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1937}
1938
1939static VALUE
1940vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1941 rb_num_t throw_state, VALUE throwobj)
1942{
1943 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1944 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1945
1946 if (state != 0) {
1947 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1948 }
1949 else {
1950 return vm_throw_continue(ec, throwobj);
1951 }
1952}
1953
1954VALUE
1955rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1956{
1957 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1958}
1959
1960static inline void
1961vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1962{
1963 int is_splat = flag & 0x01;
1964 const VALUE *ptr;
1965 rb_num_t len;
1966 const VALUE obj = ary;
1967
1968 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1969 ary = obj;
1970 ptr = &ary;
1971 len = 1;
1972 }
1973 else {
1974 ptr = RARRAY_CONST_PTR(ary);
1975 len = (rb_num_t)RARRAY_LEN(ary);
1976 }
1977
1978 if (num + is_splat == 0) {
1979 /* no space left on stack */
1980 }
1981 else if (flag & 0x02) {
1982 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1983 rb_num_t i = 0, j;
1984
1985 if (len < num) {
1986 for (i = 0; i < num - len; i++) {
1987 *cfp->sp++ = Qnil;
1988 }
1989 }
1990
1991 for (j = 0; i < num; i++, j++) {
1992 VALUE v = ptr[len - j - 1];
1993 *cfp->sp++ = v;
1994 }
1995
1996 if (is_splat) {
1997 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1998 }
1999 }
2000 else {
2001 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
2002 if (is_splat) {
2003 if (num > len) {
2004 *cfp->sp++ = rb_ary_new();
2005 }
2006 else {
2007 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2008 }
2009 }
2010
2011 if (num > len) {
2012 rb_num_t i = 0;
2013 for (; i < num - len; i++) {
2014 *cfp->sp++ = Qnil;
2015 }
2016
2017 for (rb_num_t j = 0; i < num; i++, j++) {
2018 *cfp->sp++ = ptr[len - j - 1];
2019 }
2020 }
2021 else {
2022 for (rb_num_t j = 0; j < num; j++) {
2023 *cfp->sp++ = ptr[num - j - 1];
2024 }
2025 }
2026 }
2027
2028 RB_GC_GUARD(ary);
2029}
2030
2031static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2032
2033static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2034
2035static struct rb_class_cc_entries *
2036vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2037{
2038 int initial_capa = 2;
2039 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2040#if VM_CHECK_MODE > 0
2041 ccs->debug_sig = ~(VALUE)ccs;
2042#endif
2043 ccs->capa = initial_capa;
2044 ccs->len = 0;
2045 ccs->cme = cme;
2046 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2047
2048 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2049 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2050 return ccs;
2051}
2052
2053static void
2054vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2055{
2056 if (! vm_cc_markable(cc)) {
2057 return;
2058 }
2059
2060 if (UNLIKELY(ccs->len == ccs->capa)) {
2061 RUBY_ASSERT(ccs->capa > 0);
2062 ccs->capa *= 2;
2063 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2064#if VM_CHECK_MODE > 0
2065 ccs->debug_sig = ~(VALUE)ccs;
2066#endif
2067 // GC?
2068 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2069 }
2070 VM_ASSERT(ccs->len < ccs->capa);
2071
2072 const int pos = ccs->len++;
2073 ccs->entries[pos].argc = vm_ci_argc(ci);
2074 ccs->entries[pos].flag = vm_ci_flag(ci);
2075 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2076
2077 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2078 // for tuning
2079 // vm_mtbl_dump(klass, 0);
2080 }
2081}
2082
2083#if VM_CHECK_MODE > 0
2084void
2085rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2086{
2087 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2088 for (int i=0; i<ccs->len; i++) {
2089 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2090 ccs->entries[i].flag,
2091 ccs->entries[i].argc);
2092 rp(ccs->entries[i].cc);
2093 }
2094}
2095
2096static int
2097vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2098{
2099 VM_ASSERT(vm_ccs_p(ccs));
2100 VM_ASSERT(ccs->len <= ccs->capa);
2101
2102 for (int i=0; i<ccs->len; i++) {
2103 const struct rb_callcache *cc = ccs->entries[i].cc;
2104
2105 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2106 VM_ASSERT(vm_cc_class_check(cc, klass));
2107 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2108 VM_ASSERT(!vm_cc_super_p(cc));
2109 VM_ASSERT(!vm_cc_refinement_p(cc));
2110 }
2111 return TRUE;
2112}
2113#endif
2114
2115const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2116
2117static void
2118vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2119{
2120 ASSERT_vm_locking();
2121
2122 if (rb_multi_ractor_p()) {
2123 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2124 // Another ractor updated the CC table while we were waiting on the VM lock.
2125 // We have to retry.
2126 return;
2127 }
2128
2129 VALUE ccs_obj = 0;
2130 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2131 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2132
2133 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2134 // Another ractor replaced that entry while we were waiting on the VM lock.
2135 return;
2136 }
2137
2138 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2139 rb_vm_cc_table_delete(new_table, mid);
2140 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2141 }
2142 else {
2143 rb_vm_cc_table_delete(cc_tbl, mid);
2144 }
2145}
2146
2147static const struct rb_callcache *
2148vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2149{
2150 ASSERT_vm_locking();
2151
2152 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2153
2154 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
2155
2156 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2157
2158 if (cme == NULL) {
2159 // undef or not found: can't cache the information
2160 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2161 return &vm_empty_cc;
2162 }
2163
2164 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2165 const VALUE original_cc_table = cc_tbl;
2166 if (!cc_tbl) {
2167 // Is this possible after rb_callable_method_entry ?
2168 cc_tbl = rb_vm_cc_table_create(1);
2169 }
2170 else if (rb_multi_ractor_p()) {
2171 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2172 }
2173
2174 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2175
2176 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2177
2178 VM_ASSERT(cc_tbl);
2179
2180 struct rb_class_cc_entries *ccs = NULL;
2181 {
2182 VALUE ccs_obj;
2183 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2184 ccs = (struct rb_class_cc_entries *)ccs_obj;
2185 }
2186 else {
2187 // TODO: required?
2188 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2189 }
2190 }
2191
2192 cme = rb_check_overloaded_cme(cme, ci);
2193
2194 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2195 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2196
2197 VM_ASSERT(vm_cc_cme(cc) != NULL);
2198 VM_ASSERT(cme->called_id == mid);
2199 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2200
2201 if (original_cc_table != cc_tbl) {
2202 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2203 }
2204
2205 return cc;
2206}
2207
2208static const struct rb_callcache *
2209vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2210{
2211 VALUE cc_tbl;
2212 struct rb_class_cc_entries *ccs;
2213retry:
2214 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2215 ccs = NULL;
2216
2217 if (cc_tbl) {
2218 // CCS data is keyed on method id, so we don't need the method id
2219 // for doing comparisons in the `for` loop below.
2220
2221 VALUE ccs_obj;
2222 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2223 ccs = (struct rb_class_cc_entries *)ccs_obj;
2224 const int ccs_len = ccs->len;
2225
2226 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2227 RB_VM_LOCKING() {
2228 vm_evict_cc(klass, cc_tbl, mid);
2229 }
2230 goto retry;
2231 }
2232 else {
2233 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2234
2235 // We already know the method id is correct because we had
2236 // to look up the ccs_data by method id. All we need to
2237 // compare is argc and flag
2238 unsigned int argc = vm_ci_argc(ci);
2239 unsigned int flag = vm_ci_flag(ci);
2240
2241 for (int i=0; i<ccs_len; i++) {
2242 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2243 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2244 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2245
2246 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2247
2248 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2249 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2250
2251 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2252 VM_ASSERT(ccs_cc->klass == klass);
2253 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2254
2255 return ccs_cc;
2256 }
2257 }
2258 }
2259 }
2260 }
2261
2262 RB_GC_GUARD(cc_tbl);
2263 return NULL;
2264}
2265
2266static const struct rb_callcache *
2267vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2268{
2269 const ID mid = vm_ci_mid(ci);
2270
2271 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2272 if (cc) {
2273 return cc;
2274 }
2275
2276 RB_VM_LOCKING() {
2277 if (rb_multi_ractor_p()) {
2278 // The CC may have been populated by another ractor while we were waiting on the lock,
2279 // so we must lookup a second time.
2280 cc = vm_lookup_cc(klass, ci, mid);
2281 }
2282
2283 if (!cc) {
2284 cc = vm_populate_cc(klass, ci, mid);
2285 }
2286 }
2287
2288 return cc;
2289}
2290
2291const struct rb_callcache *
2292rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2293{
2294 const struct rb_callcache *cc;
2295
2296 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2297
2298 cc = vm_search_cc(klass, ci);
2299
2300 VM_ASSERT(cc);
2301 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2302 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2303 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2304 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2305 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2306
2307 return cc;
2308}
2309
2310static const struct rb_callcache *
2311vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2312{
2313#if USE_DEBUG_COUNTER
2314 const struct rb_callcache *old_cc = cd->cc;
2315#endif
2316
2317 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2318
2319#if OPT_INLINE_METHOD_CACHE
2320 cd->cc = cc;
2321
2322 const struct rb_callcache *empty_cc = &vm_empty_cc;
2323 if (cd_owner && cc != empty_cc) {
2324 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2325 }
2326
2327#if USE_DEBUG_COUNTER
2328 if (!old_cc || old_cc == empty_cc) {
2329 // empty
2330 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2331 }
2332 else if (old_cc == cc) {
2333 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2334 }
2335 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2336 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2337 }
2338 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2339 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2340 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2341 }
2342 else {
2343 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2344 }
2345#endif
2346#endif // OPT_INLINE_METHOD_CACHE
2347
2348 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2349 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2350
2351 return cc;
2352}
2353
2354ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2355static const struct rb_callcache *
2356vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2357{
2358 const struct rb_callcache *cc = cd->cc;
2359
2360#if OPT_INLINE_METHOD_CACHE
2361 if (LIKELY(vm_cc_class_check(cc, klass))) {
2362 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2363 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2364 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2365 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2366 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2367 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2368
2369 return cc;
2370 }
2371 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2372 }
2373 else {
2374 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2375 }
2376#endif
2377
2378 return vm_search_method_slowpath0(cd_owner, cd, klass);
2379}
2380
2381static const struct rb_callable_method_entry_struct *
2382vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2383{
2384 VALUE klass = CLASS_OF(recv);
2385 VM_ASSERT(klass != Qfalse);
2386 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2387
2388 const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
2389 return vm_cc_cme(cc);
2390}
2391
2393rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2394{
2395 return vm_search_method(cd_owner, cd, recv);
2396}
2397
2398#if __has_attribute(transparent_union)
2399typedef union {
2400 VALUE (*anyargs)(ANYARGS);
2401 VALUE (*f00)(VALUE);
2402 VALUE (*f01)(VALUE, VALUE);
2403 VALUE (*f02)(VALUE, VALUE, VALUE);
2404 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2405 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2406 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2407 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2408 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2417 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2418} __attribute__((__transparent_union__)) cfunc_type;
2419# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2420#else
2421typedef VALUE (*cfunc_type)(ANYARGS);
2422# define make_cfunc_type(f) (cfunc_type)(f)
2423#endif
2424
2425static inline int
2426check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2427{
2428 if (! me) {
2429 return false;
2430 }
2431 else {
2432 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2433 VM_ASSERT(callable_method_entry_p(me));
2434 VM_ASSERT(me->def);
2435 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2436 return false;
2437 }
2438 else {
2439#if __has_attribute(transparent_union)
2440 return me->def->body.cfunc.func == func.anyargs;
2441#else
2442 return me->def->body.cfunc.func == func;
2443#endif
2444 }
2445 }
2446}
2447
2448static inline int
2449check_method_basic_definition(const rb_callable_method_entry_t *me)
2450{
2451 return me && METHOD_ENTRY_BASIC(me);
2452}
2453
2454static inline int
2455vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2456{
2457 VM_ASSERT(iseq != NULL);
2458 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
2459 return check_cfunc(cme, func);
2460}
2461
2462bool
2463rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2464{
2465 return check_cfunc(me, func);
2466}
2467
2468int
2469rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2470{
2471 return vm_method_cfunc_is(iseq, cd, recv, func);
2472}
2473
2474#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2475#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2476
2477#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2478
2479static inline bool
2480FIXNUM_2_P(VALUE a, VALUE b)
2481{
2482 /* FIXNUM_P(a) && FIXNUM_P(b)
2483 * == ((a & 1) && (b & 1))
2484 * == a & b & 1 */
2485 SIGNED_VALUE x = a;
2486 SIGNED_VALUE y = b;
2487 SIGNED_VALUE z = x & y & 1;
2488 return z == 1;
2489}
2490
2491static inline bool
2492FLONUM_2_P(VALUE a, VALUE b)
2493{
2494#if USE_FLONUM
2495 /* FLONUM_P(a) && FLONUM_P(b)
2496 * == ((a & 3) == 2) && ((b & 3) == 2)
2497 * == ! ((a ^ 2) | (b ^ 2) & 3)
2498 */
2499 SIGNED_VALUE x = a;
2500 SIGNED_VALUE y = b;
2501 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2502 return !z;
2503#else
2504 return false;
2505#endif
2506}
2507
2508static VALUE
2509opt_equality_specialized(VALUE recv, VALUE obj)
2510{
2511 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2512 goto compare_by_identity;
2513 }
2514 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2515 goto compare_by_identity;
2516 }
2517 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2518 goto compare_by_identity;
2519 }
2520 else if (SPECIAL_CONST_P(recv)) {
2521 //
2522 }
2523 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2524 double a = RFLOAT_VALUE(recv);
2525 double b = RFLOAT_VALUE(obj);
2526
2527 return RBOOL(a == b);
2528 }
2529 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2530 if (recv == obj) {
2531 return Qtrue;
2532 }
2533 else if (RB_TYPE_P(obj, T_STRING)) {
2534 return rb_str_eql_internal(obj, recv);
2535 }
2536 }
2537 return Qundef;
2538
2539 compare_by_identity:
2540 return RBOOL(recv == obj);
2541}
2542
2543static VALUE
2544opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2545{
2546 VM_ASSERT(cd_owner != NULL);
2547
2548 VALUE val = opt_equality_specialized(recv, obj);
2549 if (!UNDEF_P(val)) return val;
2550
2551 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2552 return Qundef;
2553 }
2554 else {
2555 return RBOOL(recv == obj);
2556 }
2557}
2558
2559#undef EQ_UNREDEFINED_P
2560
2561static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2562NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2563
2564static VALUE
2565opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2566{
2567 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2568
2569 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2570 return RBOOL(recv == obj);
2571 }
2572 else {
2573 return Qundef;
2574 }
2575}
2576
2577static VALUE
2578opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2579{
2580 VALUE val = opt_equality_specialized(recv, obj);
2581 if (!UNDEF_P(val)) {
2582 return val;
2583 }
2584 else {
2585 return opt_equality_by_mid_slowpath(recv, obj, mid);
2586 }
2587}
2588
2589VALUE
2590rb_equal_opt(VALUE obj1, VALUE obj2)
2591{
2592 return opt_equality_by_mid(obj1, obj2, idEq);
2593}
2594
2595VALUE
2596rb_eql_opt(VALUE obj1, VALUE obj2)
2597{
2598 return opt_equality_by_mid(obj1, obj2, idEqlP);
2599}
2600
2601extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2602extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2603
2604static VALUE
2605check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2606{
2607 switch (type) {
2608 case VM_CHECKMATCH_TYPE_WHEN:
2609 return pattern;
2610 case VM_CHECKMATCH_TYPE_RESCUE:
2611 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2612 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2613 }
2614 /* fall through */
2615 case VM_CHECKMATCH_TYPE_CASE: {
2616 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2617 }
2618 default:
2619 rb_bug("check_match: unreachable");
2620 }
2621}
2622
2623
2624static inline VALUE
2625double_cmp_lt(double a, double b)
2626{
2627 return RBOOL(a < b);
2628}
2629
2630static inline VALUE
2631double_cmp_le(double a, double b)
2632{
2633 return RBOOL(a <= b);
2634}
2635
2636static inline VALUE
2637double_cmp_gt(double a, double b)
2638{
2639 return RBOOL(a > b);
2640}
2641
2642static inline VALUE
2643double_cmp_ge(double a, double b)
2644{
2645 return RBOOL(a >= b);
2646}
2647
2648// Copied by vm_dump.c
2649static inline VALUE *
2650vm_base_ptr(const rb_control_frame_t *cfp)
2651{
2652 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2653
2654 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2655 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2656
2657 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2658 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2659 int params = ISEQ_BODY(cfp->iseq)->param.size;
2660
2661 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2662 bp += vm_ci_argc(ci);
2663 }
2664
2665 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2666 /* adjust `self' */
2667 bp += 1;
2668 }
2669#if VM_DEBUG_BP_CHECK
2670 if (bp != cfp->bp_check) {
2671 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2672 (long)(cfp->bp_check - GET_EC()->vm_stack),
2673 (long)(bp - GET_EC()->vm_stack));
2674 rb_bug("vm_base_ptr: unreachable");
2675 }
2676#endif
2677 return bp;
2678 }
2679 else {
2680 return NULL;
2681 }
2682}
2683
2684VALUE *
2685rb_vm_base_ptr(const rb_control_frame_t *cfp)
2686{
2687 return vm_base_ptr(cfp);
2688}
2689
2690/* method call processes with call_info */
2691
2692#include "vm_args.c"
2693
2694static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2695ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2696static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2697static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2698static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2699static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2700static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2701
2702static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2703
2704static VALUE
2705vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2706{
2707 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2708
2709 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2710}
2711
2712static VALUE
2713vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2714{
2715 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2716
2717 const struct rb_callcache *cc = calling->cc;
2718 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2719 int param = ISEQ_BODY(iseq)->param.size;
2720 int local = ISEQ_BODY(iseq)->local_table_size;
2721 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2722}
2723
2724bool
2725rb_simple_iseq_p(const rb_iseq_t *iseq)
2726{
2727 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2728 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2729 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2730 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2731 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2732 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2733 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2734 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2735}
2736
2737bool
2738rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2739{
2740 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2741 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2742 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2743 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2744 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2745 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2746 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2747 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2748}
2749
2750bool
2751rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2752{
2753 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2754 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2757 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2758 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2759 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2760}
2761
2762#define ALLOW_HEAP_ARGV (-2)
2763#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2764
2765static inline bool
2766vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2767{
2768 vm_check_canary(GET_EC(), cfp->sp);
2769 bool ret = false;
2770
2771 if (!NIL_P(ary)) {
2772 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2773 long len = RARRAY_LEN(ary);
2774 int argc = calling->argc;
2775
2776 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2777 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2778 * a temporary array, instead of trying to keeping arguments on the VM stack.
2779 */
2780 VALUE *argv = cfp->sp - argc;
2781 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2782 rb_ary_cat(argv_ary, argv, argc);
2783 rb_ary_cat(argv_ary, ptr, len);
2784 cfp->sp -= argc - 1;
2785 cfp->sp[-1] = argv_ary;
2786 calling->argc = 1;
2787 calling->heap_argv = argv_ary;
2788 RB_GC_GUARD(ary);
2789 }
2790 else {
2791 long i;
2792
2793 if (max_args >= 0 && len + argc > max_args) {
2794 /* If only a given max_args is allowed, copy up to max args.
2795 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2796 * where additional arguments are ignored.
2797 *
2798 * Also, copy up to one more argument than the maximum,
2799 * in case it is an empty keyword hash that will be removed.
2800 */
2801 calling->argc += len - (max_args - argc + 1);
2802 len = max_args - argc + 1;
2803 ret = true;
2804 }
2805 else {
2806 /* Unset heap_argv if set originally. Can happen when
2807 * forwarding modified arguments, where heap_argv was used
2808 * originally, but heap_argv not supported by the forwarded
2809 * method in all cases.
2810 */
2811 calling->heap_argv = 0;
2812 }
2813 CHECK_VM_STACK_OVERFLOW(cfp, len);
2814
2815 for (i = 0; i < len; i++) {
2816 *cfp->sp++ = ptr[i];
2817 }
2818 calling->argc += i;
2819 }
2820 }
2821
2822 return ret;
2823}
2824
2825static inline void
2826vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2827{
2828 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2829 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2830 const VALUE h = rb_hash_new_with_size(kw_len);
2831 VALUE *sp = cfp->sp;
2832 int i;
2833
2834 for (i=0; i<kw_len; i++) {
2835 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2836 }
2837 (sp-kw_len)[0] = h;
2838
2839 cfp->sp -= kw_len - 1;
2840 calling->argc -= kw_len - 1;
2841 calling->kw_splat = 1;
2842}
2843
2844static inline VALUE
2845vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2846{
2847 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2848 if (keyword_hash != Qnil) {
2849 /* Convert a non-hash keyword splat to a new hash */
2850 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2851 }
2852 }
2853 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2854 /* Convert a hash keyword splat to a new hash unless
2855 * a mutable keyword splat was passed.
2856 * Skip allocating new hash for empty keyword splat, as empty
2857 * keyword splat will be ignored by both callers.
2858 */
2859 keyword_hash = rb_hash_dup(keyword_hash);
2860 }
2861 return keyword_hash;
2862}
2863
2864static inline void
2865CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2866 struct rb_calling_info *restrict calling,
2867 const struct rb_callinfo *restrict ci, int max_args)
2868{
2869 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2870 if (IS_ARGS_KW_SPLAT(ci)) {
2871 // f(*a, **kw)
2872 VM_ASSERT(calling->kw_splat == 1);
2873
2874 cfp->sp -= 2;
2875 calling->argc -= 2;
2876 VALUE ary = cfp->sp[0];
2877 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2878
2879 // splat a
2880 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2881
2882 // put kw
2883 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2884 if (UNLIKELY(calling->heap_argv)) {
2885 rb_ary_push(calling->heap_argv, kwh);
2886 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2887 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2888 calling->kw_splat = 0;
2889 }
2890 }
2891 else {
2892 cfp->sp[0] = kwh;
2893 cfp->sp++;
2894 calling->argc++;
2895
2896 VM_ASSERT(calling->kw_splat == 1);
2897 }
2898 }
2899 else {
2900 calling->kw_splat = 0;
2901 }
2902 }
2903 else {
2904 // f(*a)
2905 VM_ASSERT(calling->kw_splat == 0);
2906
2907 cfp->sp -= 1;
2908 calling->argc -= 1;
2909 VALUE ary = cfp->sp[0];
2910
2911 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2912 goto check_keyword;
2913 }
2914
2915 // check the last argument
2916 VALUE last_hash, argv_ary;
2917 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2918 if (!IS_ARGS_KEYWORD(ci) &&
2919 RARRAY_LEN(argv_ary) > 0 &&
2920 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2921 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2922
2923 rb_ary_pop(argv_ary);
2924 if (!RHASH_EMPTY_P(last_hash)) {
2925 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2926 calling->kw_splat = 1;
2927 }
2928 }
2929 }
2930 else {
2931check_keyword:
2932 if (!IS_ARGS_KEYWORD(ci) &&
2933 calling->argc > 0 &&
2934 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2935 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2936
2937 if (RHASH_EMPTY_P(last_hash)) {
2938 calling->argc--;
2939 cfp->sp -= 1;
2940 }
2941 else {
2942 cfp->sp[-1] = rb_hash_dup(last_hash);
2943 calling->kw_splat = 1;
2944 }
2945 }
2946 }
2947 }
2948 }
2949 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2950 // f(**kw)
2951 VM_ASSERT(calling->kw_splat == 1);
2952 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2953
2954 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2955 cfp->sp--;
2956 calling->argc--;
2957 calling->kw_splat = 0;
2958 }
2959 else {
2960 cfp->sp[-1] = kwh;
2961 }
2962 }
2963 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2964 // f(k1:1, k2:2)
2965 VM_ASSERT(calling->kw_splat == 0);
2966
2967 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2968 * by creating a keyword hash.
2969 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2970 */
2971 vm_caller_setup_arg_kw(cfp, calling, ci);
2972 }
2973}
2974
2975#define USE_OPT_HIST 0
2976
2977#if USE_OPT_HIST
2978#define OPT_HIST_MAX 64
2979static int opt_hist[OPT_HIST_MAX+1];
2980
2981__attribute__((destructor))
2982static void
2983opt_hist_show_results_at_exit(void)
2984{
2985 for (int i=0; i<OPT_HIST_MAX; i++) {
2986 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2987 }
2988}
2989#endif
2990
2991static VALUE
2992vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2993 struct rb_calling_info *calling)
2994{
2995 const struct rb_callcache *cc = calling->cc;
2996 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2997 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2998 const int opt = calling->argc - lead_num;
2999 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3000 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3001 const int param = ISEQ_BODY(iseq)->param.size;
3002 const int local = ISEQ_BODY(iseq)->local_table_size;
3003 const int delta = opt_num - opt;
3004
3005 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3006
3007#if USE_OPT_HIST
3008 if (opt_pc < OPT_HIST_MAX) {
3009 opt_hist[opt]++;
3010 }
3011 else {
3012 opt_hist[OPT_HIST_MAX]++;
3013 }
3014#endif
3015
3016 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3017}
3018
3019static VALUE
3020vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3021 struct rb_calling_info *calling)
3022{
3023 const struct rb_callcache *cc = calling->cc;
3024 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3025 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3026 const int opt = calling->argc - lead_num;
3027 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3028
3029 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3030
3031#if USE_OPT_HIST
3032 if (opt_pc < OPT_HIST_MAX) {
3033 opt_hist[opt]++;
3034 }
3035 else {
3036 opt_hist[OPT_HIST_MAX]++;
3037 }
3038#endif
3039
3040 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3041}
3042
3043static void
3044args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3045 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3046 VALUE *const locals);
3047
3048static VALUE
3049vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3050 struct rb_calling_info *calling)
3051{
3052 const struct rb_callcache *cc = calling->cc;
3053 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3054 int param_size = ISEQ_BODY(iseq)->param.size;
3055 int local_size = ISEQ_BODY(iseq)->local_table_size;
3056
3057 // Setting up local size and param size
3058 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3059
3060 local_size = local_size + vm_ci_argc(calling->cd->ci);
3061 param_size = param_size + vm_ci_argc(calling->cd->ci);
3062
3063 cfp->sp[0] = (VALUE)calling->cd->ci;
3064
3065 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3066}
3067
3068static VALUE
3069vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3070 struct rb_calling_info *calling)
3071{
3072 const struct rb_callinfo *ci = calling->cd->ci;
3073 const struct rb_callcache *cc = calling->cc;
3074
3075 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3076 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3077
3078 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3079 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3080 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3081 const int ci_kw_len = kw_arg->keyword_len;
3082 const VALUE * const ci_keywords = kw_arg->keywords;
3083 VALUE *argv = cfp->sp - calling->argc;
3084 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3085 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3086 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3087 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3088 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3089
3090 int param = ISEQ_BODY(iseq)->param.size;
3091 int local = ISEQ_BODY(iseq)->local_table_size;
3092 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3093}
3094
3095static VALUE
3096vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3097 struct rb_calling_info *calling)
3098{
3099 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3100 const struct rb_callcache *cc = calling->cc;
3101
3102 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3103 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3104
3105 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3106 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3107 VALUE * const argv = cfp->sp - calling->argc;
3108 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3109
3110 int i;
3111 for (i=0; i<kw_param->num; i++) {
3112 klocals[i] = kw_param->default_values[i];
3113 }
3114 klocals[i] = INT2FIX(0); // kw specify flag
3115 // NOTE:
3116 // nobody check this value, but it should be cleared because it can
3117 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3118
3119 int param = ISEQ_BODY(iseq)->param.size;
3120 int local = ISEQ_BODY(iseq)->local_table_size;
3121 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3122}
3123
3124static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3125
3126static VALUE
3127vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3128 struct rb_calling_info *calling)
3129{
3130 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3131 cfp->sp -= (calling->argc + 1);
3132 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3133 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3134}
3135
3136VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3137
3138static void
3139warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3140{
3141 rb_vm_t *vm = GET_VM();
3142 set_table *dup_check_table = vm->unused_block_warning_table;
3143 st_data_t key;
3144 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3145
3146 union {
3147 VALUE v;
3148 unsigned char b[SIZEOF_VALUE];
3149 } k1 = {
3150 .v = (VALUE)pc,
3151 }, k2 = {
3152 .v = (VALUE)cme->def,
3153 };
3154
3155 // relax check
3156 if (!strict_unused_block) {
3157 key = (st_data_t)cme->def->original_id;
3158
3159 if (set_table_lookup(dup_check_table, key)) {
3160 return;
3161 }
3162 }
3163
3164 // strict check
3165 // make unique key from pc and me->def pointer
3166 key = 0;
3167 for (int i=0; i<SIZEOF_VALUE; i++) {
3168 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3169 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3170 }
3171
3172 if (0) {
3173 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3174 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3175 fprintf(stderr, "key:%p\n", (void *)key);
3176 }
3177
3178 // duplication check
3179 if (set_insert(dup_check_table, key)) {
3180 // already shown
3181 }
3182 else if (RTEST(ruby_verbose) || strict_unused_block) {
3183 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3184 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3185
3186 if (!NIL_P(m_loc)) {
3187 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3188 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3189 }
3190 else {
3191 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3192 }
3193 }
3194}
3195
3196static inline int
3197vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3198 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3199{
3200 const struct rb_callinfo *ci = calling->cd->ci;
3201 const struct rb_callcache *cc = calling->cc;
3202
3203 VM_ASSERT((vm_ci_argc(ci), 1));
3204 VM_ASSERT(vm_cc_cme(cc) != NULL);
3205
3206 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3207 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3208 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3209 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3210 }
3211
3212 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3213 if (LIKELY(rb_simple_iseq_p(iseq))) {
3214 rb_control_frame_t *cfp = ec->cfp;
3215 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3216 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3217
3218 if (calling->argc != lead_num) {
3219 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3220 }
3221
3222 //VM_ASSERT(ci == calling->cd->ci);
3223 VM_ASSERT(cc == calling->cc);
3224
3225 if (vm_call_iseq_optimizable_p(ci, cc)) {
3226 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3227 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3228 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3229 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3230 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3231 }
3232 else {
3233 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3234 }
3235 }
3236 return 0;
3237 }
3238 else if (rb_iseq_only_optparam_p(iseq)) {
3239 rb_control_frame_t *cfp = ec->cfp;
3240
3241 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3242 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3243
3244 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3245 const int argc = calling->argc;
3246 const int opt = argc - lead_num;
3247
3248 if (opt < 0 || opt > opt_num) {
3249 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3250 }
3251
3252 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3253 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3254 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3255 vm_call_cacheable(ci, cc));
3256 }
3257 else {
3258 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3259 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3260 vm_call_cacheable(ci, cc));
3261 }
3262
3263 /* initialize opt vars for self-references */
3264 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3265 for (int i=argc; i<lead_num + opt_num; i++) {
3266 argv[i] = Qnil;
3267 }
3268 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3269 }
3270 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3271 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3272 const int argc = calling->argc;
3273 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3274
3275 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3276 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3277
3278 if (argc - kw_arg->keyword_len == lead_num) {
3279 const int ci_kw_len = kw_arg->keyword_len;
3280 const VALUE * const ci_keywords = kw_arg->keywords;
3281 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3282 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3283
3284 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3285 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3286
3287 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3288 vm_call_cacheable(ci, cc));
3289
3290 return 0;
3291 }
3292 }
3293 else if (argc == lead_num) {
3294 /* no kwarg */
3295 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3296 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3297
3298 if (klocals[kw_param->num] == INT2FIX(0)) {
3299 /* copy from default_values */
3300 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3301 vm_call_cacheable(ci, cc));
3302 }
3303
3304 return 0;
3305 }
3306 }
3307 }
3308
3309 // Called iseq is using ... param
3310 // def foo(...) # <- iseq for foo will have "forwardable"
3311 //
3312 // We want to set the `...` local to the caller's CI
3313 // foo(1, 2) # <- the ci for this should end up as `...`
3314 //
3315 // So hopefully the stack looks like:
3316 //
3317 // => 1
3318 // => 2
3319 // => *
3320 // => **
3321 // => &
3322 // => ... # <- points at `foo`s CI
3323 // => cref_or_me
3324 // => specval
3325 // => type
3326 //
3327 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3328 bool can_fastpath = true;
3329
3330 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3331 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3332 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3333 ci = vm_ci_new_runtime(
3334 vm_ci_mid(ci),
3335 vm_ci_flag(ci),
3336 vm_ci_argc(ci),
3337 vm_ci_kwarg(ci));
3338 }
3339 else {
3340 ci = forward_cd->caller_ci;
3341 }
3342 can_fastpath = false;
3343 }
3344 // C functions calling iseqs will stack allocate a CI,
3345 // so we need to convert it to heap allocated
3346 if (!vm_ci_markable(ci)) {
3347 ci = vm_ci_new_runtime(
3348 vm_ci_mid(ci),
3349 vm_ci_flag(ci),
3350 vm_ci_argc(ci),
3351 vm_ci_kwarg(ci));
3352 can_fastpath = false;
3353 }
3354 argv[param_size - 1] = (VALUE)ci;
3355 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3356 return 0;
3357 }
3358
3359 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3360}
3361
3362static void
3363vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3364{
3365 // This case is when the caller is using a ... parameter.
3366 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3367 // In this case the caller's caller's CI will be on the stack.
3368 //
3369 // For example:
3370 //
3371 // def bar(a, b); a + b; end
3372 // def foo(...); bar(...); end
3373 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3374 //
3375 // Stack layout will be:
3376 //
3377 // > 1
3378 // > 2
3379 // > CI for foo(1, 2)
3380 // > cref_or_me
3381 // > specval
3382 // > type
3383 // > receiver
3384 // > CI for foo(1, 2), via `getlocal ...`
3385 // > ( SP points here )
3386 const VALUE * lep = VM_CF_LEP(cfp);
3387
3388 const rb_iseq_t *iseq;
3389
3390 // If we're in an escaped environment (lambda for example), get the iseq
3391 // from the captured env.
3392 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3393 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3394 iseq = env->iseq;
3395 }
3396 else { // Otherwise use the lep to find the caller
3397 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3398 }
3399
3400 // Our local storage is below the args we need to copy
3401 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3402
3403 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3404 VALUE * to = cfp->sp - 1; // clobber the CI
3405
3406 if (RTEST(splat)) {
3407 to -= 1; // clobber the splat array
3408 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3409 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3410 to += RARRAY_LEN(splat);
3411 }
3412
3413 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3414 MEMCPY(to, from, VALUE, argc);
3415 cfp->sp = to + argc;
3416
3417 // Stack layout should now be:
3418 //
3419 // > 1
3420 // > 2
3421 // > CI for foo(1, 2)
3422 // > cref_or_me
3423 // > specval
3424 // > type
3425 // > receiver
3426 // > 1
3427 // > 2
3428 // > ( SP points here )
3429}
3430
3431static VALUE
3432vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3433{
3434 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3435
3436 const struct rb_callcache *cc = calling->cc;
3437 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3438 int param_size = ISEQ_BODY(iseq)->param.size;
3439 int local_size = ISEQ_BODY(iseq)->local_table_size;
3440
3441 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3442
3443 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3444 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3445}
3446
3447static VALUE
3448vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3449{
3450 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3451
3452 const struct rb_callcache *cc = calling->cc;
3453 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3454 int param_size = ISEQ_BODY(iseq)->param.size;
3455 int local_size = ISEQ_BODY(iseq)->local_table_size;
3456
3457 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3458
3459 // Setting up local size and param size
3460 local_size = local_size + vm_ci_argc(calling->cd->ci);
3461 param_size = param_size + vm_ci_argc(calling->cd->ci);
3462
3463 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3464 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3465}
3466
3467static inline VALUE
3468vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3469 int opt_pc, int param_size, int local_size)
3470{
3471 const struct rb_callinfo *ci = calling->cd->ci;
3472 const struct rb_callcache *cc = calling->cc;
3473
3474 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3475 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3476 }
3477 else {
3478 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3479 }
3480}
3481
3482static inline VALUE
3483vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3484 int opt_pc, int param_size, int local_size)
3485{
3486 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3487 VALUE *argv = cfp->sp - calling->argc;
3488 VALUE *sp = argv + param_size;
3489 cfp->sp = argv - 1 /* recv */;
3490
3491 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3492 calling->block_handler, (VALUE)me,
3493 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3494 local_size - param_size,
3495 ISEQ_BODY(iseq)->stack_max);
3496 return Qundef;
3497}
3498
3499static inline VALUE
3500vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3501{
3502 const struct rb_callcache *cc = calling->cc;
3503 unsigned int i;
3504 VALUE *argv = cfp->sp - calling->argc;
3505 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3506 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3507 VALUE *src_argv = argv;
3508 VALUE *sp_orig, *sp;
3509 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3510
3511 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3512 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3513 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3514 dst_captured->code.val = src_captured->code.val;
3515 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3516 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3517 }
3518 else {
3519 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3520 }
3521 }
3522
3523 vm_pop_frame(ec, cfp, cfp->ep);
3524 cfp = ec->cfp;
3525
3526 sp_orig = sp = cfp->sp;
3527
3528 /* push self */
3529 sp[0] = calling->recv;
3530 sp++;
3531
3532 /* copy arguments */
3533 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3534 *sp++ = src_argv[i];
3535 }
3536
3537 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3538 calling->recv, calling->block_handler, (VALUE)me,
3539 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3540 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3541 ISEQ_BODY(iseq)->stack_max);
3542
3543 cfp->sp = sp_orig;
3544
3545 return Qundef;
3546}
3547
3548static void
3549ractor_unsafe_check(void)
3550{
3551 if (!rb_ractor_main_p()) {
3552 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3553 }
3554}
3555
3556static VALUE
3557call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3558{
3559 ractor_unsafe_check();
3560 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3561 return (*f)(recv, rb_ary_new4(argc, argv));
3562}
3563
3564static VALUE
3565call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3566{
3567 ractor_unsafe_check();
3568 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3569 return (*f)(argc, argv, recv);
3570}
3571
3572static VALUE
3573call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3574{
3575 ractor_unsafe_check();
3576 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3577 return (*f)(recv);
3578}
3579
3580static VALUE
3581call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3582{
3583 ractor_unsafe_check();
3584 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3585 return (*f)(recv, argv[0]);
3586}
3587
3588static VALUE
3589call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3590{
3591 ractor_unsafe_check();
3592 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3593 return (*f)(recv, argv[0], argv[1]);
3594}
3595
3596static VALUE
3597call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3598{
3599 ractor_unsafe_check();
3600 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3601 return (*f)(recv, argv[0], argv[1], argv[2]);
3602}
3603
3604static VALUE
3605call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3606{
3607 ractor_unsafe_check();
3608 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3609 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3610}
3611
3612static VALUE
3613call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3614{
3615 ractor_unsafe_check();
3616 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3617 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3618}
3619
3620static VALUE
3621call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3622{
3623 ractor_unsafe_check();
3625 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3626}
3627
3628static VALUE
3629call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3630{
3631 ractor_unsafe_check();
3633 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3634}
3635
3636static VALUE
3637call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3638{
3639 ractor_unsafe_check();
3641 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3642}
3643
3644static VALUE
3645call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3646{
3647 ractor_unsafe_check();
3649 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3650}
3651
3652static VALUE
3653call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3654{
3655 ractor_unsafe_check();
3657 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3658}
3659
3660static VALUE
3661call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3662{
3663 ractor_unsafe_check();
3665 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3666}
3667
3668static VALUE
3669call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3670{
3671 ractor_unsafe_check();
3673 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3674}
3675
3676static VALUE
3677call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3678{
3679 ractor_unsafe_check();
3681 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3682}
3683
3684static VALUE
3685call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3686{
3687 ractor_unsafe_check();
3689 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3690}
3691
3692static VALUE
3693call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3694{
3695 ractor_unsafe_check();
3697 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3698}
3699
3700static VALUE
3701ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3702{
3703 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3704 return (*f)(recv, rb_ary_new4(argc, argv));
3705}
3706
3707static VALUE
3708ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3709{
3710 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3711 return (*f)(argc, argv, recv);
3712}
3713
3714static VALUE
3715ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3716{
3717 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3718 return (*f)(recv);
3719}
3720
3721static VALUE
3722ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3723{
3724 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3725 return (*f)(recv, argv[0]);
3726}
3727
3728static VALUE
3729ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3730{
3731 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3732 return (*f)(recv, argv[0], argv[1]);
3733}
3734
3735static VALUE
3736ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3737{
3738 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3739 return (*f)(recv, argv[0], argv[1], argv[2]);
3740}
3741
3742static VALUE
3743ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3744{
3745 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3746 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3747}
3748
3749static VALUE
3750ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3751{
3752 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3753 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3754}
3755
3756static VALUE
3757ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3758{
3760 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3761}
3762
3763static VALUE
3764ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3765{
3767 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3768}
3769
3770static VALUE
3771ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3772{
3774 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3775}
3776
3777static VALUE
3778ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3779{
3781 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3782}
3783
3784static VALUE
3785ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3786{
3788 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3789}
3790
3791static VALUE
3792ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3793{
3795 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3796}
3797
3798static VALUE
3799ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3800{
3802 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3803}
3804
3805static VALUE
3806ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3807{
3809 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3810}
3811
3812static VALUE
3813ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3814{
3816 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3817}
3818
3819static VALUE
3820ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3821{
3823 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3824}
3825
3826static inline int
3827vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3828{
3829 const int ov_flags = RAISED_STACKOVERFLOW;
3830 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3831 if (rb_ec_raised_p(ec, ov_flags)) {
3832 rb_ec_raised_reset(ec, ov_flags);
3833 return TRUE;
3834 }
3835 return FALSE;
3836}
3837
3838#define CHECK_CFP_CONSISTENCY(func) \
3839 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3840 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3841
3842static inline
3843const rb_method_cfunc_t *
3844vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3845{
3846#if VM_DEBUG_VERIFY_METHOD_CACHE
3847 switch (me->def->type) {
3848 case VM_METHOD_TYPE_CFUNC:
3849 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3850 break;
3851# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3852 METHOD_BUG(ISEQ);
3853 METHOD_BUG(ATTRSET);
3854 METHOD_BUG(IVAR);
3855 METHOD_BUG(BMETHOD);
3856 METHOD_BUG(ZSUPER);
3857 METHOD_BUG(UNDEF);
3858 METHOD_BUG(OPTIMIZED);
3859 METHOD_BUG(MISSING);
3860 METHOD_BUG(REFINED);
3861 METHOD_BUG(ALIAS);
3862# undef METHOD_BUG
3863 default:
3864 rb_bug("wrong method type: %d", me->def->type);
3865 }
3866#endif
3867 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3868}
3869
3870static VALUE
3871vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3872 int argc, VALUE *argv, VALUE *stack_bottom)
3873{
3874 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3875 const struct rb_callinfo *ci = calling->cd->ci;
3876 const struct rb_callcache *cc = calling->cc;
3877 VALUE val;
3878 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3879 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3880
3881 VALUE recv = calling->recv;
3882 VALUE block_handler = calling->block_handler;
3883 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3884
3885 if (UNLIKELY(calling->kw_splat)) {
3886 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3887 }
3888
3889 VM_ASSERT(reg_cfp == ec->cfp);
3890
3891 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3892 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3893
3894 vm_push_frame(ec, NULL, frame_type, recv,
3895 block_handler, (VALUE)me,
3896 0, ec->cfp->sp, 0, 0);
3897
3898 int len = cfunc->argc;
3899 if (len >= 0) rb_check_arity(argc, len, len);
3900
3901 reg_cfp->sp = stack_bottom;
3902 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3903
3904 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3905
3906 rb_vm_pop_frame(ec);
3907
3908 VM_ASSERT(ec->cfp->sp == stack_bottom);
3909
3910 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3911 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3912
3913 return val;
3914}
3915
3916// Push a C method frame for a given cme. This is called when JIT code skipped
3917// pushing a frame but the C method reached a point where a frame is needed.
3918void
3919rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3920{
3921 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3922 rb_execution_context_t *ec = GET_EC();
3923 VALUE *sp = ec->cfp->sp;
3924 VALUE recv = *(sp - recv_idx - 1);
3925 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3926 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3927#if VM_CHECK_MODE > 0
3928 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3929 *(GET_EC()->cfp->sp) = Qfalse;
3930#endif
3931 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3932}
3933
3934// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3935bool
3936rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3937{
3938 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3939}
3940
3941static VALUE
3942vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3943{
3944 int argc = calling->argc;
3945 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3946 VALUE *argv = &stack_bottom[1];
3947
3948 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3949}
3950
3951static VALUE
3952vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3953{
3954 const struct rb_callinfo *ci = calling->cd->ci;
3955 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3956
3957 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3958 VALUE argv_ary;
3959 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3960 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3961 int argc = RARRAY_LENINT(argv_ary);
3962 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3963 VALUE *stack_bottom = reg_cfp->sp - 2;
3964
3965 VM_ASSERT(calling->argc == 1);
3966 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3967 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3968
3969 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3970 }
3971 else {
3972 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3973
3974 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3975 }
3976}
3977
3978static inline VALUE
3979vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3980{
3981 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3982 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3983
3984 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3985 return vm_call_cfunc_other(ec, reg_cfp, calling);
3986 }
3987
3988 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3989 calling->kw_splat = 0;
3990 int i;
3991 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3992 VALUE *sp = stack_bottom;
3993 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3994 for(i = 0; i < argc; i++) {
3995 *++sp = argv[i];
3996 }
3997 reg_cfp->sp = sp+1;
3998
3999 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4000}
4001
4002static inline VALUE
4003vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4004{
4005 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4006 VALUE argv_ary = reg_cfp->sp[-1];
4007 int argc = RARRAY_LENINT(argv_ary);
4008 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4009 VALUE last_hash;
4010 int argc_offset = 0;
4011
4012 if (UNLIKELY(argc > 0 &&
4013 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4014 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4015 if (!RHASH_EMPTY_P(last_hash)) {
4016 return vm_call_cfunc_other(ec, reg_cfp, calling);
4017 }
4018 argc_offset++;
4019 }
4020 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4021}
4022
4023static inline VALUE
4024vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4025{
4026 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4027 VALUE keyword_hash = reg_cfp->sp[-1];
4028
4029 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4030 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4031 }
4032
4033 return vm_call_cfunc_other(ec, reg_cfp, calling);
4034}
4035
4036static VALUE
4037vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4038{
4039 const struct rb_callinfo *ci = calling->cd->ci;
4040 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4041
4042 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4043 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4044 // f(*a)
4045 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4046 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4047 }
4048 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4049 // f(*a, **kw)
4050 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4051 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4052 }
4053 }
4054
4055 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4056 return vm_call_cfunc_other(ec, reg_cfp, calling);
4057}
4058
4059static VALUE
4060vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4061{
4062 const struct rb_callcache *cc = calling->cc;
4063 RB_DEBUG_COUNTER_INC(ccf_ivar);
4064 cfp->sp -= 1;
4065 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4066 return ivar;
4067}
4068
4069static VALUE
4070vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4071{
4072 RB_DEBUG_COUNTER_INC(ccf_attrset);
4073 VALUE val = *(cfp->sp - 1);
4074 cfp->sp -= 2;
4075 attr_index_t index;
4076 shape_id_t dest_shape_id;
4077 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4078 ID id = vm_cc_cme(cc)->def->body.attr.id;
4079 rb_check_frozen(obj);
4080 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4081 if (UNDEF_P(res)) {
4082 switch (BUILTIN_TYPE(obj)) {
4083 case T_OBJECT:
4084 break;
4085 case T_CLASS:
4086 case T_MODULE:
4087 {
4088 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4089 if (!UNDEF_P(res)) {
4090 return res;
4091 }
4092 }
4093 break;
4094 default:
4095 {
4096 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4097 if (!UNDEF_P(res)) {
4098 return res;
4099 }
4100 }
4101 }
4102 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4103 }
4104 return res;
4105}
4106
4107static VALUE
4108vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4109{
4110 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4111}
4112
4113static inline VALUE
4114vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4115{
4116 rb_proc_t *proc;
4117 VALUE val;
4118 const struct rb_callcache *cc = calling->cc;
4119 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4120 VALUE procv = cme->def->body.bmethod.proc;
4121
4122 if (!RB_OBJ_SHAREABLE_P(procv) &&
4123 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4124 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4125 }
4126
4127 /* control block frame */
4128 GetProcPtr(procv, proc);
4129 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4130
4131 return val;
4132}
4133
4134static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4135
4136static VALUE
4137vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4138{
4139 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4140
4141 const struct rb_callcache *cc = calling->cc;
4142 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4143 VALUE procv = cme->def->body.bmethod.proc;
4144
4145 if (!RB_OBJ_SHAREABLE_P(procv) &&
4146 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4147 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4148 }
4149
4150 rb_proc_t *proc;
4151 GetProcPtr(procv, proc);
4152 const struct rb_block *block = &proc->block;
4153
4154 while (vm_block_type(block) == block_type_proc) {
4155 block = vm_proc_block(block->as.proc);
4156 }
4157 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4158
4159 const struct rb_captured_block *captured = &block->as.captured;
4160 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4161 VALUE * const argv = cfp->sp - calling->argc;
4162 const int arg_size = ISEQ_BODY(iseq)->param.size;
4163
4164 int opt_pc;
4165 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4166 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4167 }
4168 else {
4169 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4170 }
4171
4172 cfp->sp = argv - 1; // -1 for the receiver
4173
4174 vm_push_frame(ec, iseq,
4175 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4176 calling->recv,
4177 VM_GUARDED_PREV_EP(captured->ep),
4178 (VALUE)cme,
4179 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4180 argv + arg_size,
4181 ISEQ_BODY(iseq)->local_table_size - arg_size,
4182 ISEQ_BODY(iseq)->stack_max);
4183
4184 return Qundef;
4185}
4186
4187static VALUE
4188vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4189{
4190 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4191
4192 VALUE *argv;
4193 int argc;
4194 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4195 if (UNLIKELY(calling->heap_argv)) {
4196 argv = RARRAY_PTR(calling->heap_argv);
4197 cfp->sp -= 2;
4198 }
4199 else {
4200 argc = calling->argc;
4201 argv = ALLOCA_N(VALUE, argc);
4202 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4203 cfp->sp += - argc - 1;
4204 }
4205
4206 return vm_call_bmethod_body(ec, calling, argv);
4207}
4208
4209static VALUE
4210vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4211{
4212 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4213
4214 const struct rb_callcache *cc = calling->cc;
4215 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4216 VALUE procv = cme->def->body.bmethod.proc;
4217 rb_proc_t *proc;
4218 GetProcPtr(procv, proc);
4219 const struct rb_block *block = &proc->block;
4220
4221 while (vm_block_type(block) == block_type_proc) {
4222 block = vm_proc_block(block->as.proc);
4223 }
4224 if (vm_block_type(block) == block_type_iseq) {
4225 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4226 return vm_call_iseq_bmethod(ec, cfp, calling);
4227 }
4228
4229 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4230 return vm_call_noniseq_bmethod(ec, cfp, calling);
4231}
4232
4233VALUE
4234rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4235{
4236 VALUE klass = current_class;
4237
4238 /* for prepended Module, then start from cover class */
4239 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4240 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4241 klass = RBASIC_CLASS(klass);
4242 }
4243
4244 while (RTEST(klass)) {
4245 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4246 if (owner == target_owner) {
4247 return klass;
4248 }
4249 klass = RCLASS_SUPER(klass);
4250 }
4251
4252 return current_class; /* maybe module function */
4253}
4254
4255static const rb_callable_method_entry_t *
4256aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4257{
4258 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4259 const rb_callable_method_entry_t *cme;
4260
4261 if (orig_me->defined_class == 0) {
4262 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4263 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4264 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4265
4266 if (me->def->reference_count == 1) {
4267 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4268 }
4269 else {
4271 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4272 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4273 }
4274 }
4275 else {
4276 cme = (const rb_callable_method_entry_t *)orig_me;
4277 }
4278
4279 VM_ASSERT(callable_method_entry_p(cme));
4280 return cme;
4281}
4282
4284rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4285{
4286 return aliased_callable_method_entry(me);
4287}
4288
4289static VALUE
4290vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4291{
4292 calling->cc = &VM_CC_ON_STACK(Qundef,
4293 vm_call_general,
4294 {{0}},
4295 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4296
4297 return vm_call_method_each_type(ec, cfp, calling);
4298}
4299
4300static enum method_missing_reason
4301ci_missing_reason(const struct rb_callinfo *ci)
4302{
4303 enum method_missing_reason stat = MISSING_NOENTRY;
4304 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4305 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4306 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4307 return stat;
4308}
4309
4310static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4311
4312static VALUE
4313vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4314 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4315{
4316 ASSUME(calling->argc >= 0);
4317
4318 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4319 int argc = calling->argc;
4320 VALUE recv = calling->recv;
4321 VALUE klass = CLASS_OF(recv);
4322 ID mid = rb_check_id(&symbol);
4323 flags |= VM_CALL_OPT_SEND;
4324
4325 if (UNLIKELY(! mid)) {
4326 mid = idMethodMissing;
4327 missing_reason = ci_missing_reason(ci);
4328 ec->method_missing_reason = missing_reason;
4329
4330 VALUE argv_ary;
4331 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4332 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4333 rb_ary_unshift(argv_ary, symbol);
4334
4335 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4336 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4337 VALUE exc = rb_make_no_method_exception(
4338 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4339
4340 rb_exc_raise(exc);
4341 }
4342 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4343 }
4344 else {
4345 /* E.g. when argc == 2
4346 *
4347 * | | | | TOPN
4348 * | | +------+
4349 * | | +---> | arg1 | 0
4350 * +------+ | +------+
4351 * | arg1 | -+ +-> | arg0 | 1
4352 * +------+ | +------+
4353 * | arg0 | ---+ | sym | 2
4354 * +------+ +------+
4355 * | recv | | recv | 3
4356 * --+------+--------+------+------
4357 */
4358 int i = argc;
4359 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4360 INC_SP(1);
4361 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4362 argc = ++calling->argc;
4363
4364 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4365 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4366 TOPN(i) = symbol;
4367 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4368 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4369 VALUE exc = rb_make_no_method_exception(
4370 rb_eNoMethodError, 0, recv, argc, argv, priv);
4371
4372 rb_exc_raise(exc);
4373 }
4374 else {
4375 TOPN(i) = rb_str_intern(symbol);
4376 }
4377 }
4378 }
4379
4380 struct rb_forwarding_call_data new_fcd = {
4381 .cd = {
4382 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4383 .cc = NULL,
4384 },
4385 .caller_ci = NULL,
4386 };
4387
4388 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4389 calling->cd = &new_fcd.cd;
4390 }
4391 else {
4392 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4393 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4394 new_fcd.caller_ci = caller_ci;
4395 calling->cd = (struct rb_call_data *)&new_fcd;
4396 }
4397 calling->cc = &VM_CC_ON_STACK(klass,
4398 vm_call_general,
4399 { .method_missing_reason = missing_reason },
4400 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4401
4402 if (flags & VM_CALL_FCALL) {
4403 return vm_call_method(ec, reg_cfp, calling);
4404 }
4405
4406 const struct rb_callcache *cc = calling->cc;
4407 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4408
4409 if (vm_cc_cme(cc) != NULL) {
4410 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4411 case METHOD_VISI_PUBLIC: /* likely */
4412 return vm_call_method_each_type(ec, reg_cfp, calling);
4413 case METHOD_VISI_PRIVATE:
4414 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4415 break;
4416 case METHOD_VISI_PROTECTED:
4417 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4418 break;
4419 default:
4420 VM_UNREACHABLE(vm_call_method);
4421 }
4422 return vm_call_method_missing(ec, reg_cfp, calling);
4423 }
4424
4425 return vm_call_method_nome(ec, reg_cfp, calling);
4426}
4427
4428static VALUE
4429vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4430{
4431 const struct rb_callinfo *ci = calling->cd->ci;
4432 int i;
4433 VALUE sym;
4434
4435 i = calling->argc - 1;
4436
4437 if (calling->argc == 0) {
4438 rb_raise(rb_eArgError, "no method name given");
4439 }
4440
4441 sym = TOPN(i);
4442 /* E.g. when i == 2
4443 *
4444 * | | | | TOPN
4445 * +------+ | |
4446 * | arg1 | ---+ | | 0
4447 * +------+ | +------+
4448 * | arg0 | -+ +-> | arg1 | 1
4449 * +------+ | +------+
4450 * | sym | +---> | arg0 | 2
4451 * +------+ +------+
4452 * | recv | | recv | 3
4453 * --+------+--------+------+------
4454 */
4455 /* shift arguments */
4456 if (i > 0) {
4457 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4458 }
4459 calling->argc -= 1;
4460 DEC_SP(1);
4461
4462 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4463}
4464
4465static VALUE
4466vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4467{
4468 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4469 const struct rb_callinfo *ci = calling->cd->ci;
4470 int flags = VM_CALL_FCALL;
4471 VALUE sym;
4472
4473 VALUE argv_ary;
4474 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4475 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4476 sym = rb_ary_shift(argv_ary);
4477 flags |= VM_CALL_ARGS_SPLAT;
4478 if (calling->kw_splat) {
4479 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4480 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4481 calling->kw_splat = 0;
4482 }
4483 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4484 }
4485
4486 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4487 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4488}
4489
4490static VALUE
4491vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4492{
4493 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4494 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4495}
4496
4497static VALUE
4498vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4499{
4500 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4501
4502 const struct rb_callinfo *ci = calling->cd->ci;
4503 int flags = vm_ci_flag(ci);
4504
4505 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4506 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4507 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4508 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4509 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4510 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4511 }
4512
4513 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4514 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4515}
4516
4517static VALUE
4518vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4519 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4520{
4521 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4522
4523 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4524 unsigned int argc, flag;
4525
4526 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4527 argc = ++calling->argc;
4528
4529 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4530 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4531 vm_check_canary(ec, reg_cfp->sp);
4532 if (argc > 1) {
4533 MEMMOVE(argv+1, argv, VALUE, argc-1);
4534 }
4535 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4536 INC_SP(1);
4537
4538 ec->method_missing_reason = reason;
4539
4540 struct rb_forwarding_call_data new_fcd = {
4541 .cd = {
4542 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4543 .cc = NULL,
4544 },
4545 .caller_ci = NULL,
4546 };
4547
4548 if (!(flag & VM_CALL_FORWARDING)) {
4549 calling->cd = &new_fcd.cd;
4550 }
4551 else {
4552 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4553 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4554 new_fcd.caller_ci = caller_ci;
4555 calling->cd = (struct rb_call_data *)&new_fcd;
4556 }
4557
4558 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4559 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4560 return vm_call_method(ec, reg_cfp, calling);
4561}
4562
4563static VALUE
4564vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4565{
4566 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4567}
4568
4569static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4570static VALUE
4571vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4572{
4573 klass = RCLASS_SUPER(klass);
4574
4575 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4576 if (cme == NULL) {
4577 return vm_call_method_nome(ec, cfp, calling);
4578 }
4579 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4580 cme->def->body.refined.orig_me) {
4581 cme = refined_method_callable_without_refinement(cme);
4582 }
4583
4584 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4585
4586 return vm_call_method_each_type(ec, cfp, calling);
4587}
4588
4589static inline VALUE
4590find_refinement(VALUE refinements, VALUE klass)
4591{
4592 if (NIL_P(refinements)) {
4593 return Qnil;
4594 }
4595 return rb_hash_lookup(refinements, klass);
4596}
4597
4598PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4599static rb_control_frame_t *
4600current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4601{
4602 rb_control_frame_t *top_cfp = cfp;
4603
4604 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4605 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4606
4607 do {
4608 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4609 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4610 /* TODO: orphan block */
4611 return top_cfp;
4612 }
4613 } while (cfp->iseq != local_iseq);
4614 }
4615 return cfp;
4616}
4617
4618static const rb_callable_method_entry_t *
4619refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4620{
4621 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4622 const rb_callable_method_entry_t *cme;
4623
4624 if (orig_me->defined_class == 0) {
4625 cme = NULL;
4627 }
4628 else {
4629 cme = (const rb_callable_method_entry_t *)orig_me;
4630 }
4631
4632 VM_ASSERT(callable_method_entry_p(cme));
4633
4634 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4635 cme = NULL;
4636 }
4637
4638 return cme;
4639}
4640
4641static const rb_callable_method_entry_t *
4642search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4643{
4644 ID mid = vm_ci_mid(calling->cd->ci);
4645 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4646 const struct rb_callcache * const cc = calling->cc;
4647 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4648
4649 for (; cref; cref = CREF_NEXT(cref)) {
4650 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4651 if (NIL_P(refinement)) continue;
4652
4653 const rb_callable_method_entry_t *const ref_me =
4654 rb_callable_method_entry(refinement, mid);
4655
4656 if (ref_me) {
4657 if (vm_cc_call(cc) == vm_call_super_method) {
4658 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4659 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4660 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4661 continue;
4662 }
4663 }
4664
4665 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4666 cme->def != ref_me->def) {
4667 cme = ref_me;
4668 }
4669 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4670 return cme;
4671 }
4672 }
4673 else {
4674 return NULL;
4675 }
4676 }
4677
4678 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4679 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4680 }
4681 else {
4682 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4683 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4684 return cme;
4685 }
4686}
4687
4688static VALUE
4689vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4690{
4691 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4692
4693 if (ref_cme) {
4694 if (calling->cd->cc) {
4695 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4696 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4697 return vm_call_method(ec, cfp, calling);
4698 }
4699 else {
4700 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4701 calling->cc= ref_cc;
4702 return vm_call_method(ec, cfp, calling);
4703 }
4704 }
4705 else {
4706 return vm_call_method_nome(ec, cfp, calling);
4707 }
4708}
4709
4710static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4711
4712NOINLINE(static VALUE
4713 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4714 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4715
4716static VALUE
4717vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4718 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4719{
4720 int argc = calling->argc;
4721
4722 /* remove self */
4723 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4724 DEC_SP(1);
4725
4726 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4727}
4728
4729static VALUE
4730vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4731{
4732 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4733
4734 const struct rb_callinfo *ci = calling->cd->ci;
4735 VALUE procval = calling->recv;
4736 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4737}
4738
4739static VALUE
4740vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4741{
4742 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4743
4744 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4745 const struct rb_callinfo *ci = calling->cd->ci;
4746
4747 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4748 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4749 }
4750 else {
4751 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4752 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4753 return vm_call_general(ec, reg_cfp, calling);
4754 }
4755}
4756
4757static VALUE
4758vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4759{
4760 VALUE recv = calling->recv;
4761
4762 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4763 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4764 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4765
4766 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4767 return internal_RSTRUCT_GET(recv, off);
4768}
4769
4770static VALUE
4771vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4772{
4773 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4774
4775 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4776 reg_cfp->sp -= 1;
4777 return ret;
4778}
4779
4780static VALUE
4781vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4782{
4783 VALUE recv = calling->recv;
4784
4785 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4786 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4787 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4788
4789 rb_check_frozen(recv);
4790
4791 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4792 internal_RSTRUCT_SET(recv, off, val);
4793
4794 return val;
4795}
4796
4797static VALUE
4798vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4799{
4800 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4801
4802 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4803 reg_cfp->sp -= 2;
4804 return ret;
4805}
4806
4807NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4808 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4809
4810#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4811 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4812 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4813 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4814 var = func; \
4815 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4816 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4817 } \
4818 else { \
4819 nohook; \
4820 var = func; \
4821 }
4822
4823static VALUE
4824vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4825 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4826{
4827 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4828 case OPTIMIZED_METHOD_TYPE_SEND:
4829 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4830 return vm_call_opt_send(ec, cfp, calling);
4831 case OPTIMIZED_METHOD_TYPE_CALL:
4832 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4833 return vm_call_opt_call(ec, cfp, calling);
4834 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4835 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4836 return vm_call_opt_block_call(ec, cfp, calling);
4837 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4838 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4839 rb_check_arity(calling->argc, 0, 0);
4840
4841 VALUE v;
4842 VM_CALL_METHOD_ATTR(v,
4843 vm_call_opt_struct_aref(ec, cfp, calling),
4844 set_vm_cc_ivar(cc); \
4845 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4846 return v;
4847 }
4848 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4849 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4850 rb_check_arity(calling->argc, 1, 1);
4851
4852 VALUE v;
4853 VM_CALL_METHOD_ATTR(v,
4854 vm_call_opt_struct_aset(ec, cfp, calling),
4855 set_vm_cc_ivar(cc); \
4856 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4857 return v;
4858 }
4859 default:
4860 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4861 }
4862}
4863
4864static VALUE
4865vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4866{
4867 const struct rb_callinfo *ci = calling->cd->ci;
4868 const struct rb_callcache *cc = calling->cc;
4869 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4870 VALUE v;
4871
4872 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4873
4874 switch (cme->def->type) {
4875 case VM_METHOD_TYPE_ISEQ:
4876 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4877 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4878 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4879 }
4880 else {
4881 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4882 return vm_call_iseq_setup(ec, cfp, calling);
4883 }
4884
4885 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4886 case VM_METHOD_TYPE_CFUNC:
4887 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4888 return vm_call_cfunc(ec, cfp, calling);
4889
4890 case VM_METHOD_TYPE_ATTRSET:
4891 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4892
4893 rb_check_arity(calling->argc, 1, 1);
4894
4895 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4896
4897 if (vm_cc_markable(cc)) {
4898 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4899 VM_CALL_METHOD_ATTR(v,
4900 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4901 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4902 }
4903 else {
4904 cc = &((struct rb_callcache) {
4905 .flags = T_IMEMO |
4906 (imemo_callcache << FL_USHIFT) |
4907 VM_CALLCACHE_UNMARKABLE |
4908 VM_CALLCACHE_ON_STACK,
4909 .klass = cc->klass,
4910 .cme_ = cc->cme_,
4911 .call_ = cc->call_,
4912 .aux_ = {
4913 .attr = {
4914 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4915 }
4916 },
4917 });
4918
4919 VM_CALL_METHOD_ATTR(v,
4920 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4921 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4922 }
4923 return v;
4924
4925 case VM_METHOD_TYPE_IVAR:
4926 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4927 rb_check_arity(calling->argc, 0, 0);
4928 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4929 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4930 VM_CALL_METHOD_ATTR(v,
4931 vm_call_ivar(ec, cfp, calling),
4932 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4933 return v;
4934
4935 case VM_METHOD_TYPE_MISSING:
4936 vm_cc_method_missing_reason_set(cc, 0);
4937 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4938 return vm_call_method_missing(ec, cfp, calling);
4939
4940 case VM_METHOD_TYPE_BMETHOD:
4941 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4942 return vm_call_bmethod(ec, cfp, calling);
4943
4944 case VM_METHOD_TYPE_ALIAS:
4945 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4946 return vm_call_alias(ec, cfp, calling);
4947
4948 case VM_METHOD_TYPE_OPTIMIZED:
4949 return vm_call_optimized(ec, cfp, calling, ci, cc);
4950
4951 case VM_METHOD_TYPE_UNDEF:
4952 break;
4953
4954 case VM_METHOD_TYPE_ZSUPER:
4955 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4956
4957 case VM_METHOD_TYPE_REFINED:
4958 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4959 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4960 return vm_call_refined(ec, cfp, calling);
4961 }
4962
4963 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4964}
4965
4966NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4967
4968static VALUE
4969vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4970{
4971 /* method missing */
4972 const struct rb_callinfo *ci = calling->cd->ci;
4973 const int stat = ci_missing_reason(ci);
4974
4975 if (vm_ci_mid(ci) == idMethodMissing) {
4976 if (UNLIKELY(calling->heap_argv)) {
4977 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4978 }
4979 else {
4980 rb_control_frame_t *reg_cfp = cfp;
4981 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4982 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4983 }
4984 }
4985 else {
4986 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4987 }
4988}
4989
4990/* Protected method calls and super invocations need to check that the receiver
4991 * (self for super) inherits the module on which the method is defined.
4992 * In the case of refinements, it should consider the original class not the
4993 * refinement.
4994 */
4995static VALUE
4996vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4997{
4998 VALUE defined_class = me->defined_class;
4999 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5000 return NIL_P(refined_class) ? defined_class : refined_class;
5001}
5002
5003static inline VALUE
5004vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5005{
5006 const struct rb_callinfo *ci = calling->cd->ci;
5007 const struct rb_callcache *cc = calling->cc;
5008
5009 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5010
5011 if (vm_cc_cme(cc) != NULL) {
5012 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5013 case METHOD_VISI_PUBLIC: /* likely */
5014 return vm_call_method_each_type(ec, cfp, calling);
5015
5016 case METHOD_VISI_PRIVATE:
5017 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5018 enum method_missing_reason stat = MISSING_PRIVATE;
5019 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5020
5021 vm_cc_method_missing_reason_set(cc, stat);
5022 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5023 return vm_call_method_missing(ec, cfp, calling);
5024 }
5025 return vm_call_method_each_type(ec, cfp, calling);
5026
5027 case METHOD_VISI_PROTECTED:
5028 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5029 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5030 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5031 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5032 return vm_call_method_missing(ec, cfp, calling);
5033 }
5034 else {
5035 /* caching method info to dummy cc */
5036 VM_ASSERT(vm_cc_cme(cc) != NULL);
5037 struct rb_callcache cc_on_stack = *cc;
5038 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5039 calling->cc = &cc_on_stack;
5040 return vm_call_method_each_type(ec, cfp, calling);
5041 }
5042 }
5043 return vm_call_method_each_type(ec, cfp, calling);
5044
5045 default:
5046 rb_bug("unreachable");
5047 }
5048 }
5049 else {
5050 return vm_call_method_nome(ec, cfp, calling);
5051 }
5052}
5053
5054static VALUE
5055vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5056{
5057 RB_DEBUG_COUNTER_INC(ccf_general);
5058 return vm_call_method(ec, reg_cfp, calling);
5059}
5060
5061void
5062rb_vm_cc_general(const struct rb_callcache *cc)
5063{
5064 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5065 VM_ASSERT(cc != vm_cc_empty());
5066
5067 *(vm_call_handler *)&cc->call_ = vm_call_general;
5068}
5069
5070static VALUE
5071vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5072{
5073 RB_DEBUG_COUNTER_INC(ccf_super_method);
5074
5075 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5076 // can merge the function and the address of the function becomes same.
5077 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5078 if (ec == NULL) rb_bug("unreachable");
5079
5080 /* this check is required to distinguish with other functions. */
5081 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5082 return vm_call_method(ec, reg_cfp, calling);
5083}
5084
5085/* super */
5086
5087static inline VALUE
5088vm_search_normal_superclass(VALUE klass)
5089{
5090 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5091 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5092 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5093 klass = RBASIC(klass)->klass;
5094 }
5095 klass = RCLASS_ORIGIN(klass);
5096 return RCLASS_SUPER(klass);
5097}
5098
5099NORETURN(static void vm_super_outside(void));
5100
5101static void
5102vm_super_outside(void)
5103{
5104 rb_raise(rb_eNoMethodError, "super called outside of method");
5105}
5106
5107static const struct rb_callcache *
5108empty_cc_for_super(void)
5109{
5110 return &vm_empty_cc_for_super;
5111}
5112
5113static const struct rb_callcache *
5114vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5115{
5116 VALUE current_defined_class;
5117 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5118
5119 if (!me) {
5120 vm_super_outside();
5121 }
5122
5123 current_defined_class = vm_defined_class_for_protected_call(me);
5124
5125 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5126 reg_cfp->iseq != method_entry_iseqptr(me) &&
5127 !rb_obj_is_kind_of(recv, current_defined_class)) {
5128 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5129 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5130
5131 if (m) { /* not bound UnboundMethod */
5132 rb_raise(rb_eTypeError,
5133 "self has wrong type to call super in this context: "
5134 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5135 rb_obj_class(recv), m);
5136 }
5137 }
5138
5139 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5140 rb_raise(rb_eRuntimeError,
5141 "implicit argument passing of super from method defined"
5142 " by define_method() is not supported."
5143 " Specify all arguments explicitly.");
5144 }
5145
5146 ID mid = me->def->original_id;
5147
5148 if (!vm_ci_markable(cd->ci)) {
5149 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5150 }
5151 else {
5152 // update iseq. really? (TODO)
5153 cd->ci = vm_ci_new_runtime(mid,
5154 vm_ci_flag(cd->ci),
5155 vm_ci_argc(cd->ci),
5156 vm_ci_kwarg(cd->ci));
5157
5158 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5159 }
5160
5161 const struct rb_callcache *cc;
5162
5163 VALUE klass = vm_search_normal_superclass(me->defined_class);
5164
5165 if (!klass) {
5166 /* bound instance method of module */
5167 cc = vm_cc_new(Qundef, NULL, vm_call_method_missing, cc_type_super);
5168 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5169 }
5170 else {
5171 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5172 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5173
5174 // define_method can cache for different method id
5175 if (cached_cme == NULL) {
5176 // empty_cc_for_super is not markable object
5177 cd->cc = empty_cc_for_super();
5178 }
5179 else if (cached_cme->called_id != mid) {
5180 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5181 if (cme) {
5182 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5183 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5184 }
5185 else {
5186 cd->cc = cc = empty_cc_for_super();
5187 }
5188 }
5189 else {
5190 switch (cached_cme->def->type) {
5191 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5192 case VM_METHOD_TYPE_REFINED:
5193 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5194 case VM_METHOD_TYPE_ATTRSET:
5195 case VM_METHOD_TYPE_IVAR:
5196 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5197 break;
5198 default:
5199 break; // use fastpath
5200 }
5201 }
5202 }
5203
5204 VM_ASSERT((vm_cc_cme(cc), true));
5205
5206 return cc;
5207}
5208
5209/* yield */
5210
5211static inline int
5212block_proc_is_lambda(const VALUE procval)
5213{
5214 rb_proc_t *proc;
5215
5216 if (procval) {
5217 GetProcPtr(procval, proc);
5218 return proc->is_lambda;
5219 }
5220 else {
5221 return 0;
5222 }
5223}
5224
5225static VALUE
5226vm_yield_with_cfunc(rb_execution_context_t *ec,
5227 const struct rb_captured_block *captured,
5228 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5230{
5231 int is_lambda = FALSE; /* TODO */
5232 VALUE val, arg, blockarg;
5233 int frame_flag;
5234 const struct vm_ifunc *ifunc = captured->code.ifunc;
5235
5236 if (is_lambda) {
5237 arg = rb_ary_new4(argc, argv);
5238 }
5239 else if (argc == 0) {
5240 arg = Qnil;
5241 }
5242 else {
5243 arg = argv[0];
5244 }
5245
5246 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5247
5248 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5249 if (kw_splat) {
5250 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5251 }
5252
5253 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5254 frame_flag,
5255 self,
5256 VM_GUARDED_PREV_EP(captured->ep),
5257 (VALUE)me,
5258 0, ec->cfp->sp, 0, 0);
5259 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5260 rb_vm_pop_frame(ec);
5261
5262 return val;
5263}
5264
5265VALUE
5266rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5267{
5268 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5269}
5270
5271static VALUE
5272vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5273{
5274 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5275}
5276
5277static inline int
5278vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5279{
5280 int i;
5281 long len = RARRAY_LEN(ary);
5282
5283 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5284
5285 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5286 argv[i] = RARRAY_AREF(ary, i);
5287 }
5288
5289 return i;
5290}
5291
5292static inline VALUE
5293vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5294{
5295 VALUE ary, arg0 = argv[0];
5296 ary = rb_check_array_type(arg0);
5297#if 0
5298 argv[0] = arg0;
5299#else
5300 VM_ASSERT(argv[0] == arg0);
5301#endif
5302 return ary;
5303}
5304
5305static int
5306vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5307{
5308 if (rb_simple_iseq_p(iseq)) {
5309 rb_control_frame_t *cfp = ec->cfp;
5310 VALUE arg0;
5311
5312 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5313
5314 if (arg_setup_type == arg_setup_block &&
5315 calling->argc == 1 &&
5316 ISEQ_BODY(iseq)->param.flags.has_lead &&
5317 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5318 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5319 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5320 }
5321
5322 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5323 if (arg_setup_type == arg_setup_block) {
5324 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5325 int i;
5326 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5327 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5328 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5329 }
5330 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5331 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5332 }
5333 }
5334 else {
5335 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5336 }
5337 }
5338
5339 return 0;
5340 }
5341 else {
5342 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5343 }
5344}
5345
5346static int
5347vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5348{
5349 struct rb_calling_info calling_entry, *calling;
5350
5351 calling = &calling_entry;
5352 calling->argc = argc;
5353 calling->block_handler = block_handler;
5354 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5355 calling->recv = Qundef;
5356 calling->heap_argv = 0;
5357 calling->cc = NULL;
5358 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5359
5360 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5361}
5362
5363/* ruby iseq -> ruby block */
5364
5365static VALUE
5366vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5367 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5368 bool is_lambda, VALUE block_handler)
5369{
5370 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5371 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5372 const int arg_size = ISEQ_BODY(iseq)->param.size;
5373 VALUE * const rsp = GET_SP() - calling->argc;
5374 VALUE * const argv = rsp;
5375 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5376 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5377
5378 SET_SP(rsp);
5379
5380 vm_push_frame(ec, iseq,
5381 frame_flag,
5382 captured->self,
5383 VM_GUARDED_PREV_EP(captured->ep), 0,
5384 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5385 rsp + arg_size,
5386 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5387
5388 return Qundef;
5389}
5390
5391static VALUE
5392vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5393 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5394 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5395{
5396 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5397 int flags = vm_ci_flag(ci);
5398
5399 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5400 ((calling->argc == 0) ||
5401 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5402 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5403 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5404 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5405 flags = 0;
5406 if (UNLIKELY(calling->heap_argv)) {
5407#if VM_ARGC_STACK_MAX < 0
5408 if (RARRAY_LEN(calling->heap_argv) < 1) {
5409 rb_raise(rb_eArgError, "no receiver given");
5410 }
5411#endif
5412 calling->recv = rb_ary_shift(calling->heap_argv);
5413 // Modify stack to avoid cfp consistency error
5414 reg_cfp->sp++;
5415 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5416 reg_cfp->sp[-2] = calling->recv;
5417 flags |= VM_CALL_ARGS_SPLAT;
5418 }
5419 else {
5420 if (calling->argc < 1) {
5421 rb_raise(rb_eArgError, "no receiver given");
5422 }
5423 calling->recv = TOPN(--calling->argc);
5424 }
5425 if (calling->kw_splat) {
5426 flags |= VM_CALL_KW_SPLAT;
5427 }
5428 }
5429 else {
5430 if (calling->argc < 1) {
5431 rb_raise(rb_eArgError, "no receiver given");
5432 }
5433 calling->recv = TOPN(--calling->argc);
5434 }
5435
5436 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5437}
5438
5439static VALUE
5440vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5441 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5442 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5443{
5444 VALUE val;
5445 int argc;
5446 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5447 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5448 argc = calling->argc;
5449 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5450 POPN(argc); /* TODO: should put before C/yield? */
5451 return val;
5452}
5453
5454static VALUE
5455vm_proc_to_block_handler(VALUE procval)
5456{
5457 const struct rb_block *block = vm_proc_block(procval);
5458
5459 switch (vm_block_type(block)) {
5460 case block_type_iseq:
5461 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5462 case block_type_ifunc:
5463 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5464 case block_type_symbol:
5465 return VM_BH_FROM_SYMBOL(block->as.symbol);
5466 case block_type_proc:
5467 return VM_BH_FROM_PROC(block->as.proc);
5468 }
5469 VM_UNREACHABLE(vm_yield_with_proc);
5470 return Qundef;
5471}
5472
5473static VALUE
5474vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5475 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5476 bool is_lambda, VALUE block_handler)
5477{
5478 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5479 VALUE proc = VM_BH_TO_PROC(block_handler);
5480 is_lambda = block_proc_is_lambda(proc);
5481 block_handler = vm_proc_to_block_handler(proc);
5482 }
5483
5484 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5485}
5486
5487static inline VALUE
5488vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5489 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5490 bool is_lambda, VALUE block_handler)
5491{
5492 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5493 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5494 bool is_lambda, VALUE block_handler);
5495
5496 switch (vm_block_handler_type(block_handler)) {
5497 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5498 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5499 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5500 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5501 default: rb_bug("vm_invoke_block: unreachable");
5502 }
5503
5504 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5505}
5506
5507static VALUE
5508vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5509{
5510 const rb_execution_context_t *ec = GET_EC();
5511 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5512 struct rb_captured_block *captured;
5513
5514 if (cfp == 0) {
5515 rb_bug("vm_make_proc_with_iseq: unreachable");
5516 }
5517
5518 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5519 captured->code.iseq = blockiseq;
5520
5521 return rb_vm_make_proc(ec, captured, rb_cProc);
5522}
5523
5524static VALUE
5525vm_once_exec(VALUE iseq)
5526{
5527 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5528 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5529}
5530
5531static VALUE
5532vm_once_clear(VALUE data)
5533{
5534 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5535 is->once.running_thread = NULL;
5536 return Qnil;
5537}
5538
5539/* defined insn */
5540
5541static bool
5542check_respond_to_missing(VALUE obj, VALUE v)
5543{
5544 VALUE args[2];
5545 VALUE r;
5546
5547 args[0] = obj; args[1] = Qfalse;
5548 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5549 if (!UNDEF_P(r) && RTEST(r)) {
5550 return true;
5551 }
5552 else {
5553 return false;
5554 }
5555}
5556
5557static bool
5558vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5559{
5560 VALUE klass;
5561 enum defined_type type = (enum defined_type)op_type;
5562
5563 switch (type) {
5564 case DEFINED_IVAR:
5565 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5566 break;
5567 case DEFINED_GVAR:
5568 return rb_gvar_defined(SYM2ID(obj));
5569 break;
5570 case DEFINED_CVAR: {
5571 const rb_cref_t *cref = vm_get_cref(GET_EP());
5572 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5573 return rb_cvar_defined(klass, SYM2ID(obj));
5574 break;
5575 }
5576 case DEFINED_CONST:
5577 case DEFINED_CONST_FROM: {
5578 bool allow_nil = type == DEFINED_CONST;
5579 klass = v;
5580 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5581 break;
5582 }
5583 case DEFINED_FUNC:
5584 klass = CLASS_OF(v);
5585 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5586 break;
5587 case DEFINED_METHOD:{
5588 VALUE klass = CLASS_OF(v);
5589 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5590
5591 if (me) {
5592 switch (METHOD_ENTRY_VISI(me)) {
5593 case METHOD_VISI_PRIVATE:
5594 break;
5595 case METHOD_VISI_PROTECTED:
5596 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5597 break;
5598 }
5599 case METHOD_VISI_PUBLIC:
5600 return true;
5601 break;
5602 default:
5603 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5604 }
5605 }
5606 else {
5607 return check_respond_to_missing(obj, v);
5608 }
5609 break;
5610 }
5611 case DEFINED_YIELD:
5612 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5613 return true;
5614 }
5615 break;
5616 case DEFINED_ZSUPER:
5617 {
5618 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5619
5620 if (me) {
5621 VALUE klass = vm_search_normal_superclass(me->defined_class);
5622 if (!klass) return false;
5623
5624 ID id = me->def->original_id;
5625
5626 return rb_method_boundp(klass, id, 0);
5627 }
5628 }
5629 break;
5630 case DEFINED_REF:
5631 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5632 default:
5633 rb_bug("unimplemented defined? type (VM)");
5634 break;
5635 }
5636
5637 return false;
5638}
5639
5640bool
5641rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5642{
5643 return vm_defined(ec, reg_cfp, op_type, obj, v);
5644}
5645
5646static const VALUE *
5647vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5648{
5649 rb_num_t i;
5650 const VALUE *ep = reg_ep;
5651 for (i = 0; i < lv; i++) {
5652 ep = GET_PREV_EP(ep);
5653 }
5654 return ep;
5655}
5656
5657static VALUE
5658vm_get_special_object(const VALUE *const reg_ep,
5659 enum vm_special_object_type type)
5660{
5661 switch (type) {
5662 case VM_SPECIAL_OBJECT_VMCORE:
5663 return rb_mRubyVMFrozenCore;
5664 case VM_SPECIAL_OBJECT_CBASE:
5665 return vm_get_cbase(reg_ep);
5666 case VM_SPECIAL_OBJECT_CONST_BASE:
5667 return vm_get_const_base(reg_ep);
5668 default:
5669 rb_bug("putspecialobject insn: unknown value_type %d", type);
5670 }
5671}
5672
5673// ZJIT implementation is using the C function
5674// and needs to call a non-static function
5675VALUE
5676rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5677{
5678 return vm_get_special_object(reg_ep, type);
5679}
5680
5681static VALUE
5682vm_concat_array(VALUE ary1, VALUE ary2st)
5683{
5684 const VALUE ary2 = ary2st;
5685 VALUE tmp1 = rb_check_to_array(ary1);
5686 VALUE tmp2 = rb_check_to_array(ary2);
5687
5688 if (NIL_P(tmp1)) {
5689 tmp1 = rb_ary_new3(1, ary1);
5690 }
5691 if (tmp1 == ary1) {
5692 tmp1 = rb_ary_dup(ary1);
5693 }
5694
5695 if (NIL_P(tmp2)) {
5696 return rb_ary_push(tmp1, ary2);
5697 }
5698 else {
5699 return rb_ary_concat(tmp1, tmp2);
5700 }
5701}
5702
5703static VALUE
5704vm_concat_to_array(VALUE ary1, VALUE ary2st)
5705{
5706 /* ary1 must be a newly created array */
5707 const VALUE ary2 = ary2st;
5708
5709 if (NIL_P(ary2)) return ary1;
5710
5711 VALUE tmp2 = rb_check_to_array(ary2);
5712
5713 if (NIL_P(tmp2)) {
5714 return rb_ary_push(ary1, ary2);
5715 }
5716 else {
5717 return rb_ary_concat(ary1, tmp2);
5718 }
5719}
5720
5721// YJIT implementation is using the C function
5722// and needs to call a non-static function
5723VALUE
5724rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5725{
5726 return vm_concat_array(ary1, ary2st);
5727}
5728
5729VALUE
5730rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5731{
5732 return vm_concat_to_array(ary1, ary2st);
5733}
5734
5735static VALUE
5736vm_splat_array(VALUE flag, VALUE ary)
5737{
5738 if (NIL_P(ary)) {
5739 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5740 }
5741 VALUE tmp = rb_check_to_array(ary);
5742 if (NIL_P(tmp)) {
5743 return rb_ary_new3(1, ary);
5744 }
5745 else if (RTEST(flag)) {
5746 return rb_ary_dup(tmp);
5747 }
5748 else {
5749 return tmp;
5750 }
5751}
5752
5753// YJIT implementation is using the C function
5754// and needs to call a non-static function
5755VALUE
5756rb_vm_splat_array(VALUE flag, VALUE ary)
5757{
5758 return vm_splat_array(flag, ary);
5759}
5760
5761static VALUE
5762vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5763{
5764 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5765
5766 if (flag & VM_CHECKMATCH_ARRAY) {
5767 long i;
5768 const long n = RARRAY_LEN(pattern);
5769
5770 for (i = 0; i < n; i++) {
5771 VALUE v = RARRAY_AREF(pattern, i);
5772 VALUE c = check_match(ec, v, target, type);
5773
5774 if (RTEST(c)) {
5775 return c;
5776 }
5777 }
5778 return Qfalse;
5779 }
5780 else {
5781 return check_match(ec, pattern, target, type);
5782 }
5783}
5784
5785VALUE
5786rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5787{
5788 return vm_check_match(ec, target, pattern, flag);
5789}
5790
5791static VALUE
5792vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5793{
5794 const VALUE kw_bits = *(ep - bits);
5795
5796 if (FIXNUM_P(kw_bits)) {
5797 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5798 if ((idx < VM_KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5799 return Qfalse;
5800 }
5801 else {
5802 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5803 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5804 }
5805 return Qtrue;
5806}
5807
5808static void
5809vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5810{
5811 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5812 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5813 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5814 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5815
5816 switch (flag) {
5817 case RUBY_EVENT_CALL:
5818 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5819 return;
5820 case RUBY_EVENT_C_CALL:
5821 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5822 return;
5823 case RUBY_EVENT_RETURN:
5824 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5825 return;
5827 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5828 return;
5829 }
5830 }
5831}
5832
5833static VALUE
5834vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5835{
5836 if (!rb_const_defined_at(cbase, id)) {
5837 return 0;
5838 }
5839 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5840 return rb_public_const_get_at(cbase, id);
5841 }
5842 else {
5843 return rb_const_get_at(cbase, id);
5844 }
5845}
5846
5847static VALUE
5848vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5849{
5850 if (!RB_TYPE_P(klass, T_CLASS)) {
5851 return 0;
5852 }
5853 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5854 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5855
5856 if (tmp != super) {
5857 rb_raise(rb_eTypeError,
5858 "superclass mismatch for class %"PRIsVALUE"",
5859 rb_id2str(id));
5860 }
5861 else {
5862 return klass;
5863 }
5864 }
5865 else {
5866 return klass;
5867 }
5868}
5869
5870static VALUE
5871vm_check_if_module(ID id, VALUE mod)
5872{
5873 if (!RB_TYPE_P(mod, T_MODULE)) {
5874 return 0;
5875 }
5876 else {
5877 return mod;
5878 }
5879}
5880
5881static VALUE
5882declare_under(ID id, VALUE cbase, VALUE c)
5883{
5884 rb_set_class_path_string(c, cbase, rb_id2str(id));
5885 rb_const_set(cbase, id, c);
5886 return c;
5887}
5888
5889static VALUE
5890vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5891{
5892 /* new class declaration */
5893 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5894 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5896 rb_class_inherited(s, c);
5897 return c;
5898}
5899
5900static VALUE
5901vm_declare_module(ID id, VALUE cbase)
5902{
5903 /* new module declaration */
5904 return declare_under(id, cbase, rb_module_new());
5905}
5906
5907NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5908static void
5909unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5910{
5911 VALUE name = rb_id2str(id);
5912 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5913 name, type);
5914 VALUE location = rb_const_source_location_at(cbase, id);
5915 if (!NIL_P(location)) {
5916 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5917 " previous definition of %"PRIsVALUE" was here",
5918 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5919 }
5921}
5922
5923static VALUE
5924vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5925{
5926 VALUE klass;
5927
5928 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5929 rb_raise(rb_eTypeError,
5930 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5931 rb_obj_class(super));
5932 }
5933
5934 vm_check_if_namespace(cbase);
5935
5936 /* find klass */
5937 rb_autoload_load(cbase, id);
5938
5939 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5940 if (!vm_check_if_class(id, flags, super, klass))
5941 unmatched_redefinition("class", cbase, id, klass);
5942 return klass;
5943 }
5944 else {
5945 return vm_declare_class(id, flags, cbase, super);
5946 }
5947}
5948
5949static VALUE
5950vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5951{
5952 VALUE mod;
5953
5954 vm_check_if_namespace(cbase);
5955 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5956 if (!vm_check_if_module(id, mod))
5957 unmatched_redefinition("module", cbase, id, mod);
5958 return mod;
5959 }
5960 else {
5961 return vm_declare_module(id, cbase);
5962 }
5963}
5964
5965static VALUE
5966vm_find_or_create_class_by_id(ID id,
5967 rb_num_t flags,
5968 VALUE cbase,
5969 VALUE super)
5970{
5971 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5972
5973 switch (type) {
5974 case VM_DEFINECLASS_TYPE_CLASS:
5975 /* classdef returns class scope value */
5976 return vm_define_class(id, flags, cbase, super);
5977
5978 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5979 /* classdef returns class scope value */
5980 return rb_singleton_class(cbase);
5981
5982 case VM_DEFINECLASS_TYPE_MODULE:
5983 /* classdef returns class scope value */
5984 return vm_define_module(id, flags, cbase);
5985
5986 default:
5987 rb_bug("unknown defineclass type: %d", (int)type);
5988 }
5989}
5990
5991static rb_method_visibility_t
5992vm_scope_visibility_get(const rb_execution_context_t *ec)
5993{
5994 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5995
5996 if (!vm_env_cref_by_cref(cfp->ep)) {
5997 return METHOD_VISI_PUBLIC;
5998 }
5999 else {
6000 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6001 }
6002}
6003
6004static int
6005vm_scope_module_func_check(const rb_execution_context_t *ec)
6006{
6007 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6008
6009 if (!vm_env_cref_by_cref(cfp->ep)) {
6010 return FALSE;
6011 }
6012 else {
6013 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6014 }
6015}
6016
6017static void
6018vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6019{
6020 VALUE klass;
6021 rb_method_visibility_t visi;
6022 rb_cref_t *cref = vm_ec_cref(ec);
6023
6024 if (is_singleton) {
6025 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6026 visi = METHOD_VISI_PUBLIC;
6027 }
6028 else {
6029 klass = CREF_CLASS_FOR_DEFINITION(cref);
6030 visi = vm_scope_visibility_get(ec);
6031 }
6032
6033 if (NIL_P(klass)) {
6034 rb_raise(rb_eTypeError, "no class/module to add method");
6035 }
6036
6037 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6038 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6039 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6040 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6041 }
6042
6043 if (!is_singleton && vm_scope_module_func_check(ec)) {
6044 klass = rb_singleton_class(klass);
6045 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6046 }
6047}
6048
6049// Return the untagged block handler:
6050// * If it's VM_BLOCK_HANDLER_NONE, return nil
6051// * If it's an ISEQ or an IFUNC, fetch it from its rb_captured_block
6052// * If it's a PROC or SYMBOL, return it as is
6053static VALUE
6054rb_vm_untag_block_handler(VALUE block_handler)
6055{
6056 if (VM_BLOCK_HANDLER_NONE == block_handler) return Qnil;
6057
6058 switch (vm_block_handler_type(block_handler)) {
6059 case block_handler_type_iseq:
6060 case block_handler_type_ifunc: {
6061 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
6062 return captured->code.val;
6063 }
6064 case block_handler_type_proc:
6065 case block_handler_type_symbol:
6066 return block_handler;
6067 default:
6068 rb_bug("rb_vm_untag_block_handler: unreachable");
6069 }
6070}
6071
6072VALUE
6073rb_vm_get_untagged_block_handler(rb_control_frame_t *reg_cfp)
6074{
6075 return rb_vm_untag_block_handler(VM_CF_BLOCK_HANDLER(reg_cfp));
6076}
6077
6078static VALUE
6079vm_invokeblock_i(struct rb_execution_context_struct *ec,
6080 struct rb_control_frame_struct *reg_cfp,
6081 struct rb_calling_info *calling)
6082{
6083 const struct rb_callinfo *ci = calling->cd->ci;
6084 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6085
6086 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6087 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6088 }
6089 else {
6090 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6091 }
6092}
6093
6094enum method_explorer_type {
6095 mexp_search_method,
6096 mexp_search_invokeblock,
6097 mexp_search_super,
6098};
6099
6100static inline VALUE
6101vm_sendish(
6102 struct rb_execution_context_struct *ec,
6103 struct rb_control_frame_struct *reg_cfp,
6104 struct rb_call_data *cd,
6105 VALUE block_handler,
6106 enum method_explorer_type method_explorer
6107) {
6108 VALUE val = Qundef;
6109 const struct rb_callinfo *ci = cd->ci;
6110 const struct rb_callcache *cc;
6111 int argc = vm_ci_argc(ci);
6112 VALUE recv = TOPN(argc);
6113 struct rb_calling_info calling = {
6114 .block_handler = block_handler,
6115 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6116 .recv = recv,
6117 .argc = argc,
6118 .cd = cd,
6119 };
6120
6121 switch (method_explorer) {
6122 case mexp_search_method:
6123 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6124 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6125 break;
6126 case mexp_search_super:
6127 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6128 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6129 break;
6130 case mexp_search_invokeblock:
6131 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6132 break;
6133 }
6134 return val;
6135}
6136
6137VALUE
6138rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6139{
6140 stack_check(ec);
6141 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6142 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6143 VM_EXEC(ec, val);
6144 return val;
6145}
6146
6147VALUE
6148rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6149{
6150 stack_check(ec);
6151
6152 struct rb_forwarding_call_data adjusted_cd;
6153 struct rb_callinfo adjusted_ci;
6154
6155 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6156
6157 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6158
6159 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6160 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6161 }
6162
6163 VM_EXEC(ec, val);
6164 return val;
6165}
6166
6167VALUE
6168rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6169{
6170 stack_check(ec);
6171 VALUE bh = VM_BLOCK_HANDLER_NONE;
6172 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6173 VM_EXEC(ec, val);
6174 return val;
6175}
6176
6177VALUE
6178rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6179{
6180 stack_check(ec);
6181
6182 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6183 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6184
6185 VM_EXEC(ec, val);
6186 return val;
6187}
6188
6189VALUE
6190rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6191{
6192 stack_check(ec);
6193 struct rb_forwarding_call_data adjusted_cd;
6194 struct rb_callinfo adjusted_ci;
6195
6196 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6197
6198 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6199
6200 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6201 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6202 }
6203
6204 VM_EXEC(ec, val);
6205 return val;
6206}
6207
6208VALUE
6209rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6210{
6211 stack_check(ec);
6212 VALUE bh = VM_BLOCK_HANDLER_NONE;
6213 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6214 VM_EXEC(ec, val);
6215 return val;
6216}
6217
6218/* object.c */
6219VALUE rb_nil_to_s(VALUE);
6220VALUE rb_true_to_s(VALUE);
6221VALUE rb_false_to_s(VALUE);
6222/* numeric.c */
6223VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6224VALUE rb_fix_to_s(VALUE);
6225/* variable.c */
6226VALUE rb_mod_to_s(VALUE);
6228
6229static VALUE
6230vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6231{
6232 int type = TYPE(recv);
6233 if (type == T_STRING) {
6234 return recv;
6235 }
6236
6237 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
6238
6239 switch (type) {
6240 case T_SYMBOL:
6241 if (check_method_basic_definition(cme)) {
6242 // rb_sym_to_s() allocates a mutable string, but since we are only
6243 // going to use this string for interpolation, it's fine to use the
6244 // frozen string.
6245 return rb_sym2str(recv);
6246 }
6247 break;
6248 case T_MODULE:
6249 case T_CLASS:
6250 if (check_cfunc(cme, rb_mod_to_s)) {
6251 // rb_mod_to_s() allocates a mutable string, but since we are only
6252 // going to use this string for interpolation, it's fine to use the
6253 // frozen string.
6254 VALUE val = rb_mod_name(recv);
6255 if (NIL_P(val)) {
6256 val = rb_mod_to_s(recv);
6257 }
6258 return val;
6259 }
6260 break;
6261 case T_NIL:
6262 if (check_cfunc(cme, rb_nil_to_s)) {
6263 return rb_nil_to_s(recv);
6264 }
6265 break;
6266 case T_TRUE:
6267 if (check_cfunc(cme, rb_true_to_s)) {
6268 return rb_true_to_s(recv);
6269 }
6270 break;
6271 case T_FALSE:
6272 if (check_cfunc(cme, rb_false_to_s)) {
6273 return rb_false_to_s(recv);
6274 }
6275 break;
6276 case T_FIXNUM:
6277 if (check_cfunc(cme, rb_int_to_s)) {
6278 return rb_fix_to_s(recv);
6279 }
6280 break;
6281 }
6282 return Qundef;
6283}
6284
6285// ZJIT implementation is using the C function
6286// and needs to call a non-static function
6287VALUE
6288rb_vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6289{
6290 return vm_objtostring(iseq, recv, cd);
6291}
6292
6293static VALUE
6294vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6295{
6296 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6297 return ary;
6298 }
6299 else {
6300 return Qundef;
6301 }
6302}
6303
6304static VALUE
6305vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6306{
6307 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6308 return hash;
6309 }
6310 else {
6311 return Qundef;
6312 }
6313}
6314
6315static VALUE
6316vm_opt_str_freeze(VALUE str, int bop, ID id)
6317{
6318 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6319 return str;
6320 }
6321 else {
6322 return Qundef;
6323 }
6324}
6325
6326/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6327#define id_cmp idCmp
6328
6329static VALUE
6330vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6331{
6332 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6333 return rb_ary_includes(ary, target);
6334 }
6335 else {
6336 VALUE args[1] = {target};
6337
6338 // duparray
6339 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6340 VALUE dupary = rb_ary_resurrect(ary);
6341
6342 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6343 }
6344}
6345
6346VALUE
6347rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6348{
6349 return vm_opt_duparray_include_p(ec, ary, target);
6350}
6351
6352static VALUE
6353vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6354{
6355 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6356 if (array_len == 0) {
6357 return Qnil;
6358 }
6359 else {
6360 VALUE result = *ptr;
6361 rb_snum_t i = array_len - 1;
6362 while (i-- > 0) {
6363 const VALUE v = *++ptr;
6364 if (OPTIMIZED_CMP(v, result) > 0) {
6365 result = v;
6366 }
6367 }
6368 return result;
6369 }
6370 }
6371 else {
6372 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6373 }
6374}
6375
6376VALUE
6377rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6378{
6379 return vm_opt_newarray_max(ec, array_len, ptr);
6380}
6381
6382static VALUE
6383vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6384{
6385 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6386 if (array_len == 0) {
6387 return Qnil;
6388 }
6389 else {
6390 VALUE result = *ptr;
6391 rb_snum_t i = array_len - 1;
6392 while (i-- > 0) {
6393 const VALUE v = *++ptr;
6394 if (OPTIMIZED_CMP(v, result) < 0) {
6395 result = v;
6396 }
6397 }
6398 return result;
6399 }
6400 }
6401 else {
6402 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6403 }
6404}
6405
6406VALUE
6407rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6408{
6409 return vm_opt_newarray_min(ec, array_len, ptr);
6410}
6411
6412static VALUE
6413vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6414{
6415 // If Array#hash is _not_ monkeypatched, use the optimized call
6416 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6417 return rb_ary_hash_values(array_len, ptr);
6418 }
6419 else {
6420 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6421 }
6422}
6423
6424VALUE
6425rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6426{
6427 return vm_opt_newarray_hash(ec, array_len, ptr);
6428}
6429
6430VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6431VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6432
6433static VALUE
6434vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6435{
6436 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6437 struct RArray fake_ary = {RBASIC_INIT};
6438 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6439 return rb_ary_includes(ary, target);
6440 }
6441 else {
6442 VALUE args[1] = {target};
6443 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6444 }
6445}
6446
6447VALUE
6448rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6449{
6450 return vm_opt_newarray_include_p(ec, array_len, ptr, target);
6451}
6452
6453static VALUE
6454vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6455{
6456 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6457 struct RArray fake_ary = {RBASIC_INIT};
6458 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6459 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6460 }
6461 else {
6462 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6463 // Setup an array with room for keyword hash.
6464 VALUE args[2];
6465 args[0] = fmt;
6466 int kw_splat = RB_NO_KEYWORDS;
6467 int argc = 1;
6468
6469 if (!UNDEF_P(buffer)) {
6470 args[1] = rb_hash_new_with_size(1);
6471 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6472 kw_splat = RB_PASS_KEYWORDS;
6473 argc++;
6474 }
6475
6476 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idPack, argc, args, kw_splat);
6477 }
6478}
6479
6480VALUE
6481rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6482{
6483 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, buffer);
6484}
6485
6486VALUE
6487rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt)
6488{
6489 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, Qundef);
6490}
6491
6492#undef id_cmp
6493
6494static void
6495vm_track_constant_cache(ID id, void *ic)
6496{
6497 rb_vm_t *vm = GET_VM();
6498 struct rb_id_table *const_cache = vm->constant_cache;
6499 VALUE lookup_result;
6500 set_table *ics;
6501
6502 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6503 ics = (set_table *)lookup_result;
6504 }
6505 else {
6506 ics = set_init_numtable();
6507 rb_id_table_insert(const_cache, id, (VALUE)ics);
6508 }
6509
6510 /* The call below to st_insert could allocate which could trigger a GC.
6511 * If it triggers a GC, it may free an iseq that also holds a cache to this
6512 * constant. If that iseq is the last iseq with a cache to this constant, then
6513 * it will free this ST table, which would cause an use-after-free during this
6514 * st_insert.
6515 *
6516 * So to fix this issue, we store the ID that is currently being inserted
6517 * and, in remove_from_constant_cache, we don't free the ST table for ID
6518 * equal to this one.
6519 *
6520 * See [Bug #20921].
6521 */
6522 vm->inserting_constant_cache_id = id;
6523
6524 set_insert(ics, (st_data_t)ic);
6525
6526 vm->inserting_constant_cache_id = (ID)0;
6527}
6528
6529static void
6530vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6531{
6532 RB_VM_LOCKING() {
6533 for (int i = 0; segments[i]; i++) {
6534 ID id = segments[i];
6535 if (id == idNULL) continue;
6536 vm_track_constant_cache(id, ic);
6537 }
6538 }
6539}
6540
6541// For JIT inlining
6542static inline bool
6543vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6544{
6545 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6546 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6547
6548 return (ic_cref == NULL || // no need to check CREF
6549 ic_cref == vm_get_cref(reg_ep));
6550 }
6551 return false;
6552}
6553
6554static bool
6555vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6556{
6557 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6558 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6559}
6560
6561// YJIT needs this function to never allocate and never raise
6562bool
6563rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6564{
6565 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6566}
6567
6568static void
6569vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6570{
6571 if (ruby_vm_const_missing_count > 0) {
6572 ruby_vm_const_missing_count = 0;
6573 ic->entry = NULL;
6574 return;
6575 }
6576
6577 struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6578 RB_OBJ_WRITE(ice, &ice->value, val);
6579 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6580
6581 if (rb_ractor_shareable_p(val)) {
6582 RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
6583 ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6584 }
6585 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6586 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6587 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6588 rb_yjit_constant_ic_update(iseq, ic, pos);
6589}
6590
6591VALUE
6592rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6593{
6594 VALUE val;
6595 const ID *segments = ic->segments;
6596 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6597
6598 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6599 val = ice->value;
6600
6601 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6602 }
6603 else {
6604 ruby_vm_constant_cache_misses++;
6605 val = vm_get_ev_const_chain(ec, segments);
6606 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6607 // Undo the PC increment to get the address to this instruction
6608 // INSN_ATTR(width) == 2
6609 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6610 }
6611 return val;
6612}
6613
6614static VALUE
6615vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6616{
6617 rb_thread_t *th = rb_ec_thread_ptr(ec);
6618 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6619
6620 again:
6621 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6622 return is->once.value;
6623 }
6624 else if (is->once.running_thread == NULL) {
6625 VALUE val;
6626 is->once.running_thread = th;
6627 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6628 // TODO: confirm that it is shareable
6629
6630 if (RB_FL_ABLE(val)) {
6631 RB_OBJ_SET_SHAREABLE(val);
6632 }
6633
6634 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6635
6636 /* is->once.running_thread is cleared by vm_once_clear() */
6637 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6638 return val;
6639 }
6640 else if (is->once.running_thread == th) {
6641 /* recursive once */
6642 return vm_once_exec((VALUE)iseq);
6643 }
6644 else {
6645 /* waiting for finish */
6646 RUBY_VM_CHECK_INTS(ec);
6648 goto again;
6649 }
6650}
6651
6652static OFFSET
6653vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6654{
6655 switch (OBJ_BUILTIN_TYPE(key)) {
6656 case -1:
6657 case T_FLOAT:
6658 case T_SYMBOL:
6659 case T_BIGNUM:
6660 case T_STRING:
6661 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6662 SYMBOL_REDEFINED_OP_FLAG |
6663 INTEGER_REDEFINED_OP_FLAG |
6664 FLOAT_REDEFINED_OP_FLAG |
6665 NIL_REDEFINED_OP_FLAG |
6666 TRUE_REDEFINED_OP_FLAG |
6667 FALSE_REDEFINED_OP_FLAG |
6668 STRING_REDEFINED_OP_FLAG)) {
6669 st_data_t val;
6670 if (RB_FLOAT_TYPE_P(key)) {
6671 double kval = RFLOAT_VALUE(key);
6672 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6673 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6674 }
6675 }
6676 if (rb_hash_stlike_lookup(hash, key, &val)) {
6677 return FIX2LONG((VALUE)val);
6678 }
6679 else {
6680 return else_offset;
6681 }
6682 }
6683 }
6684 return 0;
6685}
6686
6687NORETURN(static void
6688 vm_stack_consistency_error(const rb_execution_context_t *ec,
6689 const rb_control_frame_t *,
6690 const VALUE *));
6691static void
6692vm_stack_consistency_error(const rb_execution_context_t *ec,
6693 const rb_control_frame_t *cfp,
6694 const VALUE *bp)
6695{
6696 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6697 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6698 static const char stack_consistency_error[] =
6699 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6700#if defined RUBY_DEVEL
6701 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6702 rb_str_cat_cstr(mesg, "\n");
6703 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6705#else
6706 rb_bug(stack_consistency_error, nsp, nbp);
6707#endif
6708}
6709
6710static VALUE
6711vm_opt_plus(VALUE recv, VALUE obj)
6712{
6713 if (FIXNUM_2_P(recv, obj) &&
6714 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6715 return rb_fix_plus_fix(recv, obj);
6716 }
6717 else if (FLONUM_2_P(recv, obj) &&
6718 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6719 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6720 }
6721 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6722 return Qundef;
6723 }
6724 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6725 RBASIC_CLASS(obj) == rb_cFloat &&
6726 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6727 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6728 }
6729 else if (RBASIC_CLASS(recv) == rb_cString &&
6730 RBASIC_CLASS(obj) == rb_cString &&
6731 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6732 return rb_str_opt_plus(recv, obj);
6733 }
6734 else if (RBASIC_CLASS(recv) == rb_cArray &&
6735 RBASIC_CLASS(obj) == rb_cArray &&
6736 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6737 return rb_ary_plus(recv, obj);
6738 }
6739 else {
6740 return Qundef;
6741 }
6742}
6743
6744static VALUE
6745vm_opt_minus(VALUE recv, VALUE obj)
6746{
6747 if (FIXNUM_2_P(recv, obj) &&
6748 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6749 return rb_fix_minus_fix(recv, obj);
6750 }
6751 else if (FLONUM_2_P(recv, obj) &&
6752 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6753 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6754 }
6755 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6756 return Qundef;
6757 }
6758 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6759 RBASIC_CLASS(obj) == rb_cFloat &&
6760 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6761 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6762 }
6763 else {
6764 return Qundef;
6765 }
6766}
6767
6768static VALUE
6769vm_opt_mult(VALUE recv, VALUE obj)
6770{
6771 if (FIXNUM_2_P(recv, obj) &&
6772 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6773 return rb_fix_mul_fix(recv, obj);
6774 }
6775 else if (FLONUM_2_P(recv, obj) &&
6776 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6777 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6778 }
6779 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6780 return Qundef;
6781 }
6782 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6783 RBASIC_CLASS(obj) == rb_cFloat &&
6784 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6785 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6786 }
6787 else {
6788 return Qundef;
6789 }
6790}
6791
6792static VALUE
6793vm_opt_div(VALUE recv, VALUE obj)
6794{
6795 if (FIXNUM_2_P(recv, obj) &&
6796 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6797 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6798 }
6799 else if (FLONUM_2_P(recv, obj) &&
6800 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6801 return rb_flo_div_flo(recv, obj);
6802 }
6803 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6804 return Qundef;
6805 }
6806 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6807 RBASIC_CLASS(obj) == rb_cFloat &&
6808 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6809 return rb_flo_div_flo(recv, obj);
6810 }
6811 else {
6812 return Qundef;
6813 }
6814}
6815
6816static VALUE
6817vm_opt_mod(VALUE recv, VALUE obj)
6818{
6819 if (FIXNUM_2_P(recv, obj) &&
6820 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6821 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6822 }
6823 else if (FLONUM_2_P(recv, obj) &&
6824 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6825 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6826 }
6827 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6828 return Qundef;
6829 }
6830 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6831 RBASIC_CLASS(obj) == rb_cFloat &&
6832 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6833 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6834 }
6835 else {
6836 return Qundef;
6837 }
6838}
6839
6840static VALUE
6841vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6842{
6843 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6844 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6845
6846 if (!UNDEF_P(val)) {
6847 return RBOOL(!RTEST(val));
6848 }
6849 }
6850
6851 return Qundef;
6852}
6853
6854static VALUE
6855vm_opt_lt(VALUE recv, VALUE obj)
6856{
6857 if (FIXNUM_2_P(recv, obj) &&
6858 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6859 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6860 }
6861 else if (FLONUM_2_P(recv, obj) &&
6862 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6863 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6864 }
6865 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6866 return Qundef;
6867 }
6868 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6869 RBASIC_CLASS(obj) == rb_cFloat &&
6870 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6871 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6872 }
6873 else {
6874 return Qundef;
6875 }
6876}
6877
6878static VALUE
6879vm_opt_le(VALUE recv, VALUE obj)
6880{
6881 if (FIXNUM_2_P(recv, obj) &&
6882 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6883 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6884 }
6885 else if (FLONUM_2_P(recv, obj) &&
6886 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6887 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6888 }
6889 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6890 return Qundef;
6891 }
6892 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6893 RBASIC_CLASS(obj) == rb_cFloat &&
6894 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6895 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6896 }
6897 else {
6898 return Qundef;
6899 }
6900}
6901
6902static VALUE
6903vm_opt_gt(VALUE recv, VALUE obj)
6904{
6905 if (FIXNUM_2_P(recv, obj) &&
6906 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6907 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6908 }
6909 else if (FLONUM_2_P(recv, obj) &&
6910 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6911 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6912 }
6913 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6914 return Qundef;
6915 }
6916 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6917 RBASIC_CLASS(obj) == rb_cFloat &&
6918 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6919 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6920 }
6921 else {
6922 return Qundef;
6923 }
6924}
6925
6926static VALUE
6927vm_opt_ge(VALUE recv, VALUE obj)
6928{
6929 if (FIXNUM_2_P(recv, obj) &&
6930 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6931 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6932 }
6933 else if (FLONUM_2_P(recv, obj) &&
6934 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6935 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6936 }
6937 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6938 return Qundef;
6939 }
6940 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6941 RBASIC_CLASS(obj) == rb_cFloat &&
6942 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6943 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6944 }
6945 else {
6946 return Qundef;
6947 }
6948}
6949
6950
6951static VALUE
6952vm_opt_ltlt(VALUE recv, VALUE obj)
6953{
6954 if (SPECIAL_CONST_P(recv)) {
6955 return Qundef;
6956 }
6957 else if (RBASIC_CLASS(recv) == rb_cString &&
6958 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6959 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6960 return rb_str_buf_append(recv, obj);
6961 }
6962 else {
6963 return rb_str_concat(recv, obj);
6964 }
6965 }
6966 else if (RBASIC_CLASS(recv) == rb_cArray &&
6967 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6968 return rb_ary_push(recv, obj);
6969 }
6970 else {
6971 return Qundef;
6972 }
6973}
6974
6975static VALUE
6976vm_opt_and(VALUE recv, VALUE obj)
6977{
6978 // If recv and obj are both fixnums, then the bottom tag bit
6979 // will be 1 on both. 1 & 1 == 1, so the result value will also
6980 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6981 // will be 0, and we return Qundef.
6982 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6983
6984 if (FIXNUM_P(ret) &&
6985 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6986 return ret;
6987 }
6988 else {
6989 return Qundef;
6990 }
6991}
6992
6993static VALUE
6994vm_opt_or(VALUE recv, VALUE obj)
6995{
6996 if (FIXNUM_2_P(recv, obj) &&
6997 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6998 return recv | obj;
6999 }
7000 else {
7001 return Qundef;
7002 }
7003}
7004
7005static VALUE
7006vm_opt_aref(VALUE recv, VALUE obj)
7007{
7008 if (SPECIAL_CONST_P(recv)) {
7009 if (FIXNUM_2_P(recv, obj) &&
7010 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
7011 return rb_fix_aref(recv, obj);
7012 }
7013 return Qundef;
7014 }
7015 else if (RBASIC_CLASS(recv) == rb_cArray &&
7016 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7017 if (FIXNUM_P(obj)) {
7018 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7019 }
7020 else {
7021 return rb_ary_aref1(recv, obj);
7022 }
7023 }
7024 else if (RBASIC_CLASS(recv) == rb_cHash &&
7025 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7026 return rb_hash_aref(recv, obj);
7027 }
7028 else {
7029 return Qundef;
7030 }
7031}
7032
7033static VALUE
7034vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7035{
7036 if (SPECIAL_CONST_P(recv)) {
7037 return Qundef;
7038 }
7039 else if (RBASIC_CLASS(recv) == rb_cArray &&
7040 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7041 FIXNUM_P(obj)) {
7042 rb_ary_store(recv, FIX2LONG(obj), set);
7043 return set;
7044 }
7045 else if (RBASIC_CLASS(recv) == rb_cHash &&
7046 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7047 rb_hash_aset(recv, obj, set);
7048 return set;
7049 }
7050 else {
7051 return Qundef;
7052 }
7053}
7054
7055static VALUE
7056vm_opt_length(VALUE recv, int bop)
7057{
7058 if (SPECIAL_CONST_P(recv)) {
7059 return Qundef;
7060 }
7061 else if (RBASIC_CLASS(recv) == rb_cString &&
7062 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7063 if (bop == BOP_EMPTY_P) {
7064 return LONG2NUM(RSTRING_LEN(recv));
7065 }
7066 else {
7067 return rb_str_length(recv);
7068 }
7069 }
7070 else if (RBASIC_CLASS(recv) == rb_cArray &&
7071 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7072 return LONG2NUM(RARRAY_LEN(recv));
7073 }
7074 else if (RBASIC_CLASS(recv) == rb_cHash &&
7075 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7076 return INT2FIX(RHASH_SIZE(recv));
7077 }
7078 else {
7079 return Qundef;
7080 }
7081}
7082
7083static VALUE
7084vm_opt_empty_p(VALUE recv)
7085{
7086 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7087 case Qundef: return Qundef;
7088 case INT2FIX(0): return Qtrue;
7089 default: return Qfalse;
7090 }
7091}
7092
7093VALUE rb_false(VALUE obj);
7094
7095static VALUE
7096vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7097{
7098 if (NIL_P(recv) &&
7099 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7100 return Qtrue;
7101 }
7102 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
7103 return Qfalse;
7104 }
7105 else {
7106 return Qundef;
7107 }
7108}
7109
7110static VALUE
7111fix_succ(VALUE x)
7112{
7113 switch (x) {
7114 case ~0UL:
7115 /* 0xFFFF_FFFF == INT2FIX(-1)
7116 * `-1.succ` is of course 0. */
7117 return INT2FIX(0);
7118 case RSHIFT(~0UL, 1):
7119 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7120 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7121 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7122 default:
7123 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7124 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7125 * == lx*2 + ly*2 + 1
7126 * == (lx*2+1) + (ly*2+1) - 1
7127 * == x + y - 1
7128 *
7129 * Here, if we put y := INT2FIX(1):
7130 *
7131 * == x + INT2FIX(1) - 1
7132 * == x + 2 .
7133 */
7134 return x + 2;
7135 }
7136}
7137
7138static VALUE
7139vm_opt_succ(VALUE recv)
7140{
7141 if (FIXNUM_P(recv) &&
7142 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7143 return fix_succ(recv);
7144 }
7145 else if (SPECIAL_CONST_P(recv)) {
7146 return Qundef;
7147 }
7148 else if (RBASIC_CLASS(recv) == rb_cString &&
7149 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7150 return rb_str_succ(recv);
7151 }
7152 else {
7153 return Qundef;
7154 }
7155}
7156
7157static VALUE
7158vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7159{
7160 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7161 return RBOOL(!RTEST(recv));
7162 }
7163 else {
7164 return Qundef;
7165 }
7166}
7167
7168static VALUE
7169vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7170{
7171 if (SPECIAL_CONST_P(recv)) {
7172 return Qundef;
7173 }
7174 else if (RBASIC_CLASS(recv) == rb_cString &&
7175 CLASS_OF(obj) == rb_cRegexp &&
7176 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7177 return rb_reg_match(obj, recv);
7178 }
7179 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7180 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7181 return rb_reg_match(recv, obj);
7182 }
7183 else {
7184 return Qundef;
7185 }
7186}
7187
7188rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7189
7190NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7191
7192static inline void
7193vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7194 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7195 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7196{
7197 rb_event_flag_t event = pc_events & target_event;
7198 VALUE self = GET_SELF();
7199
7200 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7201
7202 if (event & global_hooks->events) {
7203 /* increment PC because source line is calculated with PC-1 */
7204 reg_cfp->pc++;
7205 vm_dtrace(event, ec);
7206 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7207 reg_cfp->pc--;
7208 }
7209
7210 // Load here since global hook above can add and free local hooks
7211 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7212 if (local_hooks != NULL) {
7213 if (event & local_hooks->events) {
7214 /* increment PC because source line is calculated with PC-1 */
7215 reg_cfp->pc++;
7216 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7217 reg_cfp->pc--;
7218 }
7219 }
7220}
7221
7222#define VM_TRACE_HOOK(target_event, val) do { \
7223 if ((pc_events & (target_event)) & enabled_flags) { \
7224 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7225 } \
7226} while (0)
7227
7228static VALUE
7229rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7230{
7231 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7232 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7233 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7234}
7235
7236static void
7237vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7238{
7239 const VALUE *pc = reg_cfp->pc;
7240 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7241 rb_event_flag_t global_events = enabled_flags;
7242
7243 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7244 return;
7245 }
7246 else {
7247 const rb_iseq_t *iseq = reg_cfp->iseq;
7248 VALUE iseq_val = (VALUE)iseq;
7249 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7250 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7251 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7252 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7253 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7254 rb_hook_list_t *bmethod_local_hooks = NULL;
7255 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7256 rb_event_flag_t bmethod_local_events = 0;
7257 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7258 enabled_flags |= iseq_local_events;
7259
7260 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7261
7262 if (bmethod_frame) {
7263 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7264 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7265 bmethod_local_hooks = me->def->body.bmethod.hooks;
7266 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7267 if (bmethod_local_hooks) {
7268 bmethod_local_events = bmethod_local_hooks->events;
7269 }
7270 }
7271
7272
7273 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7274#if 0
7275 /* disable trace */
7276 /* TODO: incomplete */
7277 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7278#else
7279 /* do not disable trace because of performance problem
7280 * (re-enable overhead)
7281 */
7282#endif
7283 return;
7284 }
7285 else if (ec->trace_arg != NULL) {
7286 /* already tracing */
7287 return;
7288 }
7289 else {
7290 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7291 /* Note, not considering iseq local events here since the same
7292 * iseq could be used in multiple bmethods. */
7293 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7294
7295 if (0) {
7296 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7297 (int)pos,
7298 (int)pc_events,
7299 RSTRING_PTR(rb_iseq_path(iseq)),
7300 (int)rb_iseq_line_no(iseq, pos),
7301 RSTRING_PTR(rb_iseq_label(iseq)));
7302 }
7303 VM_ASSERT(reg_cfp->pc == pc);
7304 VM_ASSERT(pc_events != 0);
7305
7306 /* check traces */
7307 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7308 /* b_call instruction running as a method. Fire call event. */
7309 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7310 }
7312 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7313 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7314 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7315 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7316 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7317 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7318 /* b_return instruction running as a method. Fire return event. */
7319 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7320 }
7321
7322 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7323 // We need the pointer to stay valid in case compaction happens in a trace hook.
7324 //
7325 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7326 // storage for `rb_method_definition_t` is not on the GC heap.
7327 RB_GC_GUARD(iseq_val);
7328 }
7329 }
7330}
7331#undef VM_TRACE_HOOK
7332
7333#if VM_CHECK_MODE > 0
7334NORETURN( NOINLINE( COLDFUNC
7335void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7336
7337void
7338Init_vm_stack_canary(void)
7339{
7340 /* This has to be called _after_ our PRNG is properly set up. */
7341 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7342 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7343
7344 vm_stack_canary_was_born = true;
7345 VM_ASSERT(n == 0);
7346}
7347
7348void
7349rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7350{
7351 /* Because a method has already been called, why not call
7352 * another one. */
7353 const char *insn = rb_insns_name(i);
7354 VALUE inspection = rb_inspect(c);
7355 const char *str = StringValueCStr(inspection);
7356
7357 rb_bug("dead canary found at %s: %s", insn, str);
7358}
7359
7360#else
7361void Init_vm_stack_canary(void) { /* nothing to do */ }
7362#endif
7363
7364
7365/* a part of the following code is generated by this ruby script:
7366
736716.times{|i|
7368 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7369 typedef_args.prepend(", ") if i != 0
7370 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7371 call_args.prepend(", ") if i != 0
7372 puts %Q{
7373static VALUE
7374builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7375{
7376 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7377 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7378}}
7379}
7380
7381puts
7382puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
738316.times{|i|
7384 puts " builtin_invoker#{i},"
7385}
7386puts "};"
7387*/
7388
7389static VALUE
7390builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7391{
7392 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7393 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7394}
7395
7396static VALUE
7397builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7398{
7399 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7400 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7401}
7402
7403static VALUE
7404builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7405{
7406 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7407 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7408}
7409
7410static VALUE
7411builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7412{
7413 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7414 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7415}
7416
7417static VALUE
7418builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7419{
7420 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7421 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7422}
7423
7424static VALUE
7425builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7426{
7427 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7428 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7429}
7430
7431static VALUE
7432builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7433{
7434 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7435 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7436}
7437
7438static VALUE
7439builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7440{
7441 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7442 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7443}
7444
7445static VALUE
7446builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7447{
7448 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7449 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7450}
7451
7452static VALUE
7453builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7454{
7455 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7456 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7457}
7458
7459static VALUE
7460builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7461{
7462 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7463 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7464}
7465
7466static VALUE
7467builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7468{
7469 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7470 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7471}
7472
7473static VALUE
7474builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7475{
7476 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7477 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7478}
7479
7480static VALUE
7481builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7482{
7483 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7484 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7485}
7486
7487static VALUE
7488builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7489{
7490 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7491 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7492}
7493
7494static VALUE
7495builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7496{
7497 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7498 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7499}
7500
7501typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7502
7503static builtin_invoker
7504lookup_builtin_invoker(int argc)
7505{
7506 static const builtin_invoker invokers[] = {
7507 builtin_invoker0,
7508 builtin_invoker1,
7509 builtin_invoker2,
7510 builtin_invoker3,
7511 builtin_invoker4,
7512 builtin_invoker5,
7513 builtin_invoker6,
7514 builtin_invoker7,
7515 builtin_invoker8,
7516 builtin_invoker9,
7517 builtin_invoker10,
7518 builtin_invoker11,
7519 builtin_invoker12,
7520 builtin_invoker13,
7521 builtin_invoker14,
7522 builtin_invoker15,
7523 };
7524
7525 return invokers[argc];
7526}
7527
7528static inline VALUE
7529invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7530{
7531 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7532 SETUP_CANARY(canary_p);
7533 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7534 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7535 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7536 return ret;
7537}
7538
7539static VALUE
7540vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7541{
7542 return invoke_bf(ec, cfp, bf, argv);
7543}
7544
7545static VALUE
7546vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7547{
7548 if (0) { // debug print
7549 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7550 for (int i=0; i<bf->argc; i++) {
7551 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7552 }
7553 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7554 (void *)(uintptr_t)bf->func_ptr);
7555 }
7556
7557 if (bf->argc == 0) {
7558 return invoke_bf(ec, cfp, bf, NULL);
7559 }
7560 else {
7561 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7562 return invoke_bf(ec, cfp, bf, argv);
7563 }
7564}
7565
7566// for __builtin_inline!()
7567
7568VALUE
7569rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7570{
7571 const rb_control_frame_t *cfp = ec->cfp;
7572 return cfp->ep[index];
7573}
7574
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
static bool RB_FL_ABLE(VALUE obj)
Checks if the object is flaggable.
Definition fl_type.h:440
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2883
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1666
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1563
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1542
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3840
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:683
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
VALUE rb_eFatal
fatal exception.
Definition error.c:1427
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1439
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:696
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4161
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1482
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2164
VALUE rb_cRegexp
Regexp class.
Definition re.c:2657
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1341
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:687
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:256
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1029
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1947
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3720
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1922
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2004
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1905
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1971
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2037
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3795
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5331
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3761
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4032
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2435
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:937
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1500
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3439
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2013
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4241
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4297
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1488
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3917
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3274
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3445
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:423
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2092
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3777
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4319
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3771
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1631
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2203
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:286
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376