Ruby 3.5.0dev (2025-10-09 revision a29c90c3b0bdc355b8b6795488db3aeba2996575)
vm_insnhelper.c (a29c90c3b0bdc355b8b6795488db3aeba2996575)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* does not include singleton class */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 ent->cref = cref;
1621 ic->entry = ent;
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1625 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1626 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1627
1628 return cvar_value;
1629}
1630
1631static inline VALUE
1632vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1633{
1634 const rb_cref_t *cref;
1635 cref = vm_get_cref(GET_EP());
1636
1637 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1638 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1639
1640 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1641 RUBY_ASSERT(!UNDEF_P(v));
1642
1643 return v;
1644 }
1645
1646 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647
1648 return update_classvariable_cache(iseq, klass, id, cref, ic);
1649}
1650
1651VALUE
1652rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1653{
1654 return vm_getclassvariable(iseq, cfp, id, ic);
1655}
1656
1657static inline void
1658vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1659{
1660 const rb_cref_t *cref;
1661 cref = vm_get_cref(GET_EP());
1662
1663 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1664 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1665
1666 rb_class_ivar_set(ic->entry->class_value, id, val);
1667 return;
1668 }
1669
1670 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1671
1672 rb_cvar_set(klass, id, val);
1673
1674 update_classvariable_cache(iseq, klass, id, cref, ic);
1675}
1676
1677void
1678rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1679{
1680 vm_setclassvariable(iseq, cfp, id, val, ic);
1681}
1682
1683static inline VALUE
1684vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1685{
1686 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1687}
1688
1689static inline void
1690vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1691{
1692 if (RB_SPECIAL_CONST_P(obj)) {
1694 return;
1695 }
1696
1697 shape_id_t dest_shape_id;
1698 attr_index_t index;
1699 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1700
1701 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1702 switch (BUILTIN_TYPE(obj)) {
1703 case T_OBJECT:
1704 break;
1705 case T_CLASS:
1706 case T_MODULE:
1707 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1708 return;
1709 }
1710 break;
1711 default:
1712 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1713 return;
1714 }
1715 }
1716 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1717 }
1718}
1719
1720void
1721rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1722{
1723 vm_setinstancevariable(iseq, obj, id, val, ic);
1724}
1725
1726static VALUE
1727vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1728{
1729 /* continue throw */
1730
1731 if (FIXNUM_P(err)) {
1732 ec->tag->state = RUBY_TAG_FATAL;
1733 }
1734 else if (SYMBOL_P(err)) {
1735 ec->tag->state = TAG_THROW;
1736 }
1737 else if (THROW_DATA_P(err)) {
1738 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1739 }
1740 else {
1741 ec->tag->state = TAG_RAISE;
1742 }
1743 return err;
1744}
1745
1746static VALUE
1747vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1748 const int flag, const VALUE throwobj)
1749{
1750 const rb_control_frame_t *escape_cfp = NULL;
1751 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1752
1753 if (flag != 0) {
1754 /* do nothing */
1755 }
1756 else if (state == TAG_BREAK) {
1757 int is_orphan = 1;
1758 const VALUE *ep = GET_EP();
1759 const rb_iseq_t *base_iseq = GET_ISEQ();
1760 escape_cfp = reg_cfp;
1761
1762 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1763 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1764 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1765 ep = escape_cfp->ep;
1766 base_iseq = escape_cfp->iseq;
1767 }
1768 else {
1769 ep = VM_ENV_PREV_EP(ep);
1770 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1771 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1772 VM_ASSERT(escape_cfp->iseq == base_iseq);
1773 }
1774 }
1775
1776 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1777 /* lambda{... break ...} */
1778 is_orphan = 0;
1779 state = TAG_RETURN;
1780 }
1781 else {
1782 ep = VM_ENV_PREV_EP(ep);
1783
1784 while (escape_cfp < eocfp) {
1785 if (escape_cfp->ep == ep) {
1786 const rb_iseq_t *const iseq = escape_cfp->iseq;
1787 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1788 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1789 unsigned int i;
1790
1791 if (!ct) break;
1792 for (i=0; i < ct->size; i++) {
1793 const struct iseq_catch_table_entry *const entry =
1794 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1795
1796 if (entry->type == CATCH_TYPE_BREAK &&
1797 entry->iseq == base_iseq &&
1798 entry->start < epc && entry->end >= epc) {
1799 if (entry->cont == epc) { /* found! */
1800 is_orphan = 0;
1801 }
1802 break;
1803 }
1804 }
1805 break;
1806 }
1807
1808 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1809 }
1810 }
1811
1812 if (is_orphan) {
1813 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1814 }
1815 }
1816 else if (state == TAG_RETRY) {
1817 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1818
1819 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1820 }
1821 else if (state == TAG_RETURN) {
1822 const VALUE *current_ep = GET_EP();
1823 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1824 int in_class_frame = 0;
1825 int toplevel = 1;
1826 escape_cfp = reg_cfp;
1827
1828 // find target_lep, target_ep
1829 while (!VM_ENV_LOCAL_P(ep)) {
1830 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1831 target_ep = ep;
1832 }
1833 ep = VM_ENV_PREV_EP(ep);
1834 }
1835 target_lep = ep;
1836
1837 while (escape_cfp < eocfp) {
1838 const VALUE *lep = VM_CF_LEP(escape_cfp);
1839
1840 if (!target_lep) {
1841 target_lep = lep;
1842 }
1843
1844 if (lep == target_lep &&
1845 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1846 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1847 in_class_frame = 1;
1848 target_lep = 0;
1849 }
1850
1851 if (lep == target_lep) {
1852 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1853 toplevel = 0;
1854 if (in_class_frame) {
1855 /* lambda {class A; ... return ...; end} */
1856 goto valid_return;
1857 }
1858 else {
1859 const VALUE *tep = current_ep;
1860
1861 while (target_lep != tep) {
1862 if (escape_cfp->ep == tep) {
1863 /* in lambda */
1864 if (tep == target_ep) {
1865 goto valid_return;
1866 }
1867 else {
1868 goto unexpected_return;
1869 }
1870 }
1871 tep = VM_ENV_PREV_EP(tep);
1872 }
1873 }
1874 }
1875 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1876 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1877 case ISEQ_TYPE_TOP:
1878 case ISEQ_TYPE_MAIN:
1879 if (toplevel) {
1880 if (in_class_frame) goto unexpected_return;
1881 if (target_ep == NULL) {
1882 goto valid_return;
1883 }
1884 else {
1885 goto unexpected_return;
1886 }
1887 }
1888 break;
1889 case ISEQ_TYPE_EVAL: {
1890 const rb_iseq_t *is = escape_cfp->iseq;
1891 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1892 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1893 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1894 t = ISEQ_BODY(is)->type;
1895 }
1896 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1897 break;
1898 }
1899 case ISEQ_TYPE_CLASS:
1900 toplevel = 0;
1901 break;
1902 default:
1903 break;
1904 }
1905 }
1906 }
1907
1908 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1909 if (target_ep == NULL) {
1910 goto valid_return;
1911 }
1912 else {
1913 goto unexpected_return;
1914 }
1915 }
1916
1917 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1918 }
1919 unexpected_return:;
1920 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1921
1922 valid_return:;
1923 /* do nothing */
1924 }
1925 else {
1926 rb_bug("isns(throw): unsupported throw type");
1927 }
1928
1929 ec->tag->state = state;
1930 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1931}
1932
1933static VALUE
1934vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1935 rb_num_t throw_state, VALUE throwobj)
1936{
1937 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1938 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1939
1940 if (state != 0) {
1941 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1942 }
1943 else {
1944 return vm_throw_continue(ec, throwobj);
1945 }
1946}
1947
1948VALUE
1949rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1950{
1951 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1952}
1953
1954static inline void
1955vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1956{
1957 int is_splat = flag & 0x01;
1958 const VALUE *ptr;
1959 rb_num_t len;
1960 const VALUE obj = ary;
1961
1962 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1963 ary = obj;
1964 ptr = &ary;
1965 len = 1;
1966 }
1967 else {
1968 ptr = RARRAY_CONST_PTR(ary);
1969 len = (rb_num_t)RARRAY_LEN(ary);
1970 }
1971
1972 if (num + is_splat == 0) {
1973 /* no space left on stack */
1974 }
1975 else if (flag & 0x02) {
1976 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1977 rb_num_t i = 0, j;
1978
1979 if (len < num) {
1980 for (i = 0; i < num - len; i++) {
1981 *cfp->sp++ = Qnil;
1982 }
1983 }
1984
1985 for (j = 0; i < num; i++, j++) {
1986 VALUE v = ptr[len - j - 1];
1987 *cfp->sp++ = v;
1988 }
1989
1990 if (is_splat) {
1991 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1992 }
1993 }
1994 else {
1995 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1996 if (is_splat) {
1997 if (num > len) {
1998 *cfp->sp++ = rb_ary_new();
1999 }
2000 else {
2001 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2002 }
2003 }
2004
2005 if (num > len) {
2006 rb_num_t i = 0;
2007 for (; i < num - len; i++) {
2008 *cfp->sp++ = Qnil;
2009 }
2010
2011 for (rb_num_t j = 0; i < num; i++, j++) {
2012 *cfp->sp++ = ptr[len - j - 1];
2013 }
2014 }
2015 else {
2016 for (rb_num_t j = 0; j < num; j++) {
2017 *cfp->sp++ = ptr[num - j - 1];
2018 }
2019 }
2020 }
2021
2022 RB_GC_GUARD(ary);
2023}
2024
2025static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2026
2027static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2028
2029static struct rb_class_cc_entries *
2030vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2031{
2032 int initial_capa = 2;
2033 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2034#if VM_CHECK_MODE > 0
2035 ccs->debug_sig = ~(VALUE)ccs;
2036#endif
2037 ccs->capa = initial_capa;
2038 ccs->len = 0;
2039 ccs->cme = cme;
2040 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2041
2042 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2043 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2044 return ccs;
2045}
2046
2047static void
2048vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2049{
2050 if (! vm_cc_markable(cc)) {
2051 return;
2052 }
2053
2054 if (UNLIKELY(ccs->len == ccs->capa)) {
2055 RUBY_ASSERT(ccs->capa > 0);
2056 ccs->capa *= 2;
2057 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2058#if VM_CHECK_MODE > 0
2059 ccs->debug_sig = ~(VALUE)ccs;
2060#endif
2061 // GC?
2062 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2063 }
2064 VM_ASSERT(ccs->len < ccs->capa);
2065
2066 const int pos = ccs->len++;
2067 ccs->entries[pos].argc = vm_ci_argc(ci);
2068 ccs->entries[pos].flag = vm_ci_flag(ci);
2069 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2070
2071 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2072 // for tuning
2073 // vm_mtbl_dump(klass, 0);
2074 }
2075}
2076
2077#if VM_CHECK_MODE > 0
2078void
2079rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2080{
2081 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2082 for (int i=0; i<ccs->len; i++) {
2083 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2084 ccs->entries[i].flag,
2085 ccs->entries[i].argc);
2086 rp(ccs->entries[i].cc);
2087 }
2088}
2089
2090static int
2091vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2092{
2093 VM_ASSERT(vm_ccs_p(ccs));
2094 VM_ASSERT(ccs->len <= ccs->capa);
2095
2096 for (int i=0; i<ccs->len; i++) {
2097 const struct rb_callcache *cc = ccs->entries[i].cc;
2098
2099 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2100 VM_ASSERT(vm_cc_class_check(cc, klass));
2101 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2102 VM_ASSERT(!vm_cc_super_p(cc));
2103 VM_ASSERT(!vm_cc_refinement_p(cc));
2104 }
2105 return TRUE;
2106}
2107#endif
2108
2109const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2110
2111static void
2112vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2113{
2114 ASSERT_vm_locking();
2115
2116 if (rb_multi_ractor_p()) {
2117 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2118 // Another ractor updated the CC table while we were waiting on the VM lock.
2119 // We have to retry.
2120 return;
2121 }
2122
2123 VALUE ccs_obj = 0;
2124 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2125 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2126
2127 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2128 // Another ractor replaced that entry while we were waiting on the VM lock.
2129 return;
2130 }
2131
2132 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2133 rb_vm_cc_table_delete(new_table, mid);
2134 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2135 }
2136 else {
2137 rb_vm_cc_table_delete(cc_tbl, mid);
2138 }
2139}
2140
2141static const struct rb_callcache *
2142vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2143{
2144 ASSERT_vm_locking();
2145
2146 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2147 const VALUE original_cc_table = cc_tbl;
2148 struct rb_class_cc_entries *ccs = NULL;
2149
2150 if (!cc_tbl) {
2151 cc_tbl = rb_vm_cc_table_create(1);
2152 }
2153 else if (rb_multi_ractor_p()) {
2154 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2155 }
2156
2157 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2158
2159 const rb_callable_method_entry_t *cme;
2160
2161 if (ccs) {
2162 cme = ccs->cme;
2163 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2164
2165 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2166 }
2167 else {
2168 cme = rb_callable_method_entry(klass, mid);
2169 }
2170
2171 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2172
2173 if (cme == NULL) {
2174 // undef or not found: can't cache the information
2175 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2176 return &vm_empty_cc;
2177 }
2178
2179 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2180
2181 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2182
2183 if (ccs == NULL) {
2184 VM_ASSERT(cc_tbl);
2185
2186 VALUE ccs_obj;
2187 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2188 ccs = (struct rb_class_cc_entries *)ccs_obj;
2189 }
2190 else {
2191 // TODO: required?
2192 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2193 }
2194 }
2195
2196 cme = rb_check_overloaded_cme(cme, ci);
2197
2198 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2199 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2200
2201 VM_ASSERT(vm_cc_cme(cc) != NULL);
2202 VM_ASSERT(cme->called_id == mid);
2203 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2204
2205 if (original_cc_table != cc_tbl) {
2206 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2207 }
2208
2209 return cc;
2210}
2211
2212static const struct rb_callcache *
2213vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2214{
2215 VALUE cc_tbl;
2216 struct rb_class_cc_entries *ccs;
2217retry:
2218 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2219 ccs = NULL;
2220
2221 if (cc_tbl) {
2222 // CCS data is keyed on method id, so we don't need the method id
2223 // for doing comparisons in the `for` loop below.
2224
2225 VALUE ccs_obj;
2226 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2227 ccs = (struct rb_class_cc_entries *)ccs_obj;
2228 const int ccs_len = ccs->len;
2229
2230 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2231 RB_VM_LOCKING() {
2232 vm_evict_cc(klass, cc_tbl, mid);
2233 }
2234 goto retry;
2235 }
2236 else {
2237 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2238
2239 // We already know the method id is correct because we had
2240 // to look up the ccs_data by method id. All we need to
2241 // compare is argc and flag
2242 unsigned int argc = vm_ci_argc(ci);
2243 unsigned int flag = vm_ci_flag(ci);
2244
2245 for (int i=0; i<ccs_len; i++) {
2246 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2247 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2248 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2249
2250 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2251
2252 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2253 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2254
2255 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2256 VM_ASSERT(ccs_cc->klass == klass);
2257 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2258
2259 return ccs_cc;
2260 }
2261 }
2262 }
2263 }
2264 }
2265
2266 RB_GC_GUARD(cc_tbl);
2267 return NULL;
2268}
2269
2270static const struct rb_callcache *
2271vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2272{
2273 const ID mid = vm_ci_mid(ci);
2274
2275 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2276 if (cc) {
2277 return cc;
2278 }
2279
2280 RB_VM_LOCKING() {
2281 if (rb_multi_ractor_p()) {
2282 // The CC may have been populated by another ractor while we were waiting on the lock,
2283 // so we must lookup a second time.
2284 cc = vm_lookup_cc(klass, ci, mid);
2285 }
2286
2287 if (!cc) {
2288 cc = vm_populate_cc(klass, ci, mid);
2289 }
2290 }
2291
2292 return cc;
2293}
2294
2295const struct rb_callcache *
2296rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2297{
2298 const struct rb_callcache *cc;
2299
2300 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2301
2302 cc = vm_search_cc(klass, ci);
2303
2304 VM_ASSERT(cc);
2305 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2306 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2307 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2308 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2309 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2310
2311 return cc;
2312}
2313
2314static const struct rb_callcache *
2315vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2316{
2317#if USE_DEBUG_COUNTER
2318 const struct rb_callcache *old_cc = cd->cc;
2319#endif
2320
2321 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2322
2323#if OPT_INLINE_METHOD_CACHE
2324 cd->cc = cc;
2325
2326 const struct rb_callcache *empty_cc = &vm_empty_cc;
2327 if (cd_owner && cc != empty_cc) {
2328 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2329 }
2330
2331#if USE_DEBUG_COUNTER
2332 if (!old_cc || old_cc == empty_cc) {
2333 // empty
2334 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2335 }
2336 else if (old_cc == cc) {
2337 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2338 }
2339 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2340 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2341 }
2342 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2343 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2344 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2345 }
2346 else {
2347 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2348 }
2349#endif
2350#endif // OPT_INLINE_METHOD_CACHE
2351
2352 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2353 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2354
2355 return cc;
2356}
2357
2358ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2359static const struct rb_callcache *
2360vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2361{
2362 const struct rb_callcache *cc = cd->cc;
2363
2364#if OPT_INLINE_METHOD_CACHE
2365 if (LIKELY(vm_cc_class_check(cc, klass))) {
2366 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2367 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2368 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2369 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2370 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2371 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2372
2373 return cc;
2374 }
2375 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2376 }
2377 else {
2378 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2379 }
2380#endif
2381
2382 return vm_search_method_slowpath0(cd_owner, cd, klass);
2383}
2384
2385static const struct rb_callable_method_entry_struct *
2386vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2387{
2388 VALUE klass = CLASS_OF(recv);
2389 VM_ASSERT(klass != Qfalse);
2390 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2391
2392 const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
2393 return vm_cc_cme(cc);
2394}
2395
2397rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2398{
2399 return vm_search_method(cd_owner, cd, recv);
2400}
2401
2402#if __has_attribute(transparent_union)
2403typedef union {
2404 VALUE (*anyargs)(ANYARGS);
2405 VALUE (*f00)(VALUE);
2406 VALUE (*f01)(VALUE, VALUE);
2407 VALUE (*f02)(VALUE, VALUE, VALUE);
2408 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2409 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2410 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2411 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2412 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2421 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2422} __attribute__((__transparent_union__)) cfunc_type;
2423# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2424#else
2425typedef VALUE (*cfunc_type)(ANYARGS);
2426# define make_cfunc_type(f) (cfunc_type)(f)
2427#endif
2428
2429static inline int
2430check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2431{
2432 if (! me) {
2433 return false;
2434 }
2435 else {
2436 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2437 VM_ASSERT(callable_method_entry_p(me));
2438 VM_ASSERT(me->def);
2439 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2440 return false;
2441 }
2442 else {
2443#if __has_attribute(transparent_union)
2444 return me->def->body.cfunc.func == func.anyargs;
2445#else
2446 return me->def->body.cfunc.func == func;
2447#endif
2448 }
2449 }
2450}
2451
2452static inline int
2453check_method_basic_definition(const rb_callable_method_entry_t *me)
2454{
2455 return me && METHOD_ENTRY_BASIC(me);
2456}
2457
2458static inline int
2459vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2460{
2461 VM_ASSERT(iseq != NULL);
2462 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
2463 return check_cfunc(cme, func);
2464}
2465
2466bool
2467rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2468{
2469 return check_cfunc(me, func);
2470}
2471
2472int
2473rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2474{
2475 return vm_method_cfunc_is(iseq, cd, recv, func);
2476}
2477
2478#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2479#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2480
2481#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2482
2483static inline bool
2484FIXNUM_2_P(VALUE a, VALUE b)
2485{
2486 /* FIXNUM_P(a) && FIXNUM_P(b)
2487 * == ((a & 1) && (b & 1))
2488 * == a & b & 1 */
2489 SIGNED_VALUE x = a;
2490 SIGNED_VALUE y = b;
2491 SIGNED_VALUE z = x & y & 1;
2492 return z == 1;
2493}
2494
2495static inline bool
2496FLONUM_2_P(VALUE a, VALUE b)
2497{
2498#if USE_FLONUM
2499 /* FLONUM_P(a) && FLONUM_P(b)
2500 * == ((a & 3) == 2) && ((b & 3) == 2)
2501 * == ! ((a ^ 2) | (b ^ 2) & 3)
2502 */
2503 SIGNED_VALUE x = a;
2504 SIGNED_VALUE y = b;
2505 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2506 return !z;
2507#else
2508 return false;
2509#endif
2510}
2511
2512static VALUE
2513opt_equality_specialized(VALUE recv, VALUE obj)
2514{
2515 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2516 goto compare_by_identity;
2517 }
2518 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2519 goto compare_by_identity;
2520 }
2521 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2522 goto compare_by_identity;
2523 }
2524 else if (SPECIAL_CONST_P(recv)) {
2525 //
2526 }
2527 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2528 double a = RFLOAT_VALUE(recv);
2529 double b = RFLOAT_VALUE(obj);
2530
2531#if MSC_VERSION_BEFORE(1300)
2532 if (isnan(a)) {
2533 return Qfalse;
2534 }
2535 else if (isnan(b)) {
2536 return Qfalse;
2537 }
2538 else
2539#endif
2540 return RBOOL(a == b);
2541 }
2542 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2543 if (recv == obj) {
2544 return Qtrue;
2545 }
2546 else if (RB_TYPE_P(obj, T_STRING)) {
2547 return rb_str_eql_internal(obj, recv);
2548 }
2549 }
2550 return Qundef;
2551
2552 compare_by_identity:
2553 return RBOOL(recv == obj);
2554}
2555
2556static VALUE
2557opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2558{
2559 VM_ASSERT(cd_owner != NULL);
2560
2561 VALUE val = opt_equality_specialized(recv, obj);
2562 if (!UNDEF_P(val)) return val;
2563
2564 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2565 return Qundef;
2566 }
2567 else {
2568 return RBOOL(recv == obj);
2569 }
2570}
2571
2572#undef EQ_UNREDEFINED_P
2573
2574static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2575NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2576
2577static VALUE
2578opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2579{
2580 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2581
2582 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2583 return RBOOL(recv == obj);
2584 }
2585 else {
2586 return Qundef;
2587 }
2588}
2589
2590static VALUE
2591opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2592{
2593 VALUE val = opt_equality_specialized(recv, obj);
2594 if (!UNDEF_P(val)) {
2595 return val;
2596 }
2597 else {
2598 return opt_equality_by_mid_slowpath(recv, obj, mid);
2599 }
2600}
2601
2602VALUE
2603rb_equal_opt(VALUE obj1, VALUE obj2)
2604{
2605 return opt_equality_by_mid(obj1, obj2, idEq);
2606}
2607
2608VALUE
2609rb_eql_opt(VALUE obj1, VALUE obj2)
2610{
2611 return opt_equality_by_mid(obj1, obj2, idEqlP);
2612}
2613
2614extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2615extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2616
2617static VALUE
2618check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2619{
2620 switch (type) {
2621 case VM_CHECKMATCH_TYPE_WHEN:
2622 return pattern;
2623 case VM_CHECKMATCH_TYPE_RESCUE:
2624 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2625 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2626 }
2627 /* fall through */
2628 case VM_CHECKMATCH_TYPE_CASE: {
2629 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2630 }
2631 default:
2632 rb_bug("check_match: unreachable");
2633 }
2634}
2635
2636
2637#if MSC_VERSION_BEFORE(1300)
2638#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2639#else
2640#define CHECK_CMP_NAN(a, b) /* do nothing */
2641#endif
2642
2643static inline VALUE
2644double_cmp_lt(double a, double b)
2645{
2646 CHECK_CMP_NAN(a, b);
2647 return RBOOL(a < b);
2648}
2649
2650static inline VALUE
2651double_cmp_le(double a, double b)
2652{
2653 CHECK_CMP_NAN(a, b);
2654 return RBOOL(a <= b);
2655}
2656
2657static inline VALUE
2658double_cmp_gt(double a, double b)
2659{
2660 CHECK_CMP_NAN(a, b);
2661 return RBOOL(a > b);
2662}
2663
2664static inline VALUE
2665double_cmp_ge(double a, double b)
2666{
2667 CHECK_CMP_NAN(a, b);
2668 return RBOOL(a >= b);
2669}
2670
2671// Copied by vm_dump.c
2672static inline VALUE *
2673vm_base_ptr(const rb_control_frame_t *cfp)
2674{
2675 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2676
2677 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2678 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2679
2680 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2681 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2682 int params = ISEQ_BODY(cfp->iseq)->param.size;
2683
2684 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2685 bp += vm_ci_argc(ci);
2686 }
2687
2688 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2689 /* adjust `self' */
2690 bp += 1;
2691 }
2692#if VM_DEBUG_BP_CHECK
2693 if (bp != cfp->bp_check) {
2694 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2695 (long)(cfp->bp_check - GET_EC()->vm_stack),
2696 (long)(bp - GET_EC()->vm_stack));
2697 rb_bug("vm_base_ptr: unreachable");
2698 }
2699#endif
2700 return bp;
2701 }
2702 else {
2703 return NULL;
2704 }
2705}
2706
2707VALUE *
2708rb_vm_base_ptr(const rb_control_frame_t *cfp)
2709{
2710 return vm_base_ptr(cfp);
2711}
2712
2713/* method call processes with call_info */
2714
2715#include "vm_args.c"
2716
2717static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2718ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2719static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2720static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2721static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2722static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2723static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2724
2725static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2726
2727static VALUE
2728vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2729{
2730 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2731
2732 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2733}
2734
2735static VALUE
2736vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2737{
2738 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2739
2740 const struct rb_callcache *cc = calling->cc;
2741 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2742 int param = ISEQ_BODY(iseq)->param.size;
2743 int local = ISEQ_BODY(iseq)->local_table_size;
2744 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2745}
2746
2747bool
2748rb_simple_iseq_p(const rb_iseq_t *iseq)
2749{
2750 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2751 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2752 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2753 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2754 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2757 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2758}
2759
2760bool
2761rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2762{
2763 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2764 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2765 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2766 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2767 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2768 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2769 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2770 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2771}
2772
2773bool
2774rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2775{
2776 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2777 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2778 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2779 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2780 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2781 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2782 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2783}
2784
2785#define ALLOW_HEAP_ARGV (-2)
2786#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2787
2788static inline bool
2789vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2790{
2791 vm_check_canary(GET_EC(), cfp->sp);
2792 bool ret = false;
2793
2794 if (!NIL_P(ary)) {
2795 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2796 long len = RARRAY_LEN(ary);
2797 int argc = calling->argc;
2798
2799 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2800 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2801 * a temporary array, instead of trying to keeping arguments on the VM stack.
2802 */
2803 VALUE *argv = cfp->sp - argc;
2804 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2805 rb_ary_cat(argv_ary, argv, argc);
2806 rb_ary_cat(argv_ary, ptr, len);
2807 cfp->sp -= argc - 1;
2808 cfp->sp[-1] = argv_ary;
2809 calling->argc = 1;
2810 calling->heap_argv = argv_ary;
2811 RB_GC_GUARD(ary);
2812 }
2813 else {
2814 long i;
2815
2816 if (max_args >= 0 && len + argc > max_args) {
2817 /* If only a given max_args is allowed, copy up to max args.
2818 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2819 * where additional arguments are ignored.
2820 *
2821 * Also, copy up to one more argument than the maximum,
2822 * in case it is an empty keyword hash that will be removed.
2823 */
2824 calling->argc += len - (max_args - argc + 1);
2825 len = max_args - argc + 1;
2826 ret = true;
2827 }
2828 else {
2829 /* Unset heap_argv if set originally. Can happen when
2830 * forwarding modified arguments, where heap_argv was used
2831 * originally, but heap_argv not supported by the forwarded
2832 * method in all cases.
2833 */
2834 calling->heap_argv = 0;
2835 }
2836 CHECK_VM_STACK_OVERFLOW(cfp, len);
2837
2838 for (i = 0; i < len; i++) {
2839 *cfp->sp++ = ptr[i];
2840 }
2841 calling->argc += i;
2842 }
2843 }
2844
2845 return ret;
2846}
2847
2848static inline void
2849vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2850{
2851 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2852 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2853 const VALUE h = rb_hash_new_with_size(kw_len);
2854 VALUE *sp = cfp->sp;
2855 int i;
2856
2857 for (i=0; i<kw_len; i++) {
2858 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2859 }
2860 (sp-kw_len)[0] = h;
2861
2862 cfp->sp -= kw_len - 1;
2863 calling->argc -= kw_len - 1;
2864 calling->kw_splat = 1;
2865}
2866
2867static inline VALUE
2868vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2869{
2870 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2871 if (keyword_hash != Qnil) {
2872 /* Convert a non-hash keyword splat to a new hash */
2873 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2874 }
2875 }
2876 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2877 /* Convert a hash keyword splat to a new hash unless
2878 * a mutable keyword splat was passed.
2879 * Skip allocating new hash for empty keyword splat, as empty
2880 * keyword splat will be ignored by both callers.
2881 */
2882 keyword_hash = rb_hash_dup(keyword_hash);
2883 }
2884 return keyword_hash;
2885}
2886
2887static inline void
2888CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2889 struct rb_calling_info *restrict calling,
2890 const struct rb_callinfo *restrict ci, int max_args)
2891{
2892 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2893 if (IS_ARGS_KW_SPLAT(ci)) {
2894 // f(*a, **kw)
2895 VM_ASSERT(calling->kw_splat == 1);
2896
2897 cfp->sp -= 2;
2898 calling->argc -= 2;
2899 VALUE ary = cfp->sp[0];
2900 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2901
2902 // splat a
2903 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2904
2905 // put kw
2906 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2907 if (UNLIKELY(calling->heap_argv)) {
2908 rb_ary_push(calling->heap_argv, kwh);
2909 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2910 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2911 calling->kw_splat = 0;
2912 }
2913 }
2914 else {
2915 cfp->sp[0] = kwh;
2916 cfp->sp++;
2917 calling->argc++;
2918
2919 VM_ASSERT(calling->kw_splat == 1);
2920 }
2921 }
2922 else {
2923 calling->kw_splat = 0;
2924 }
2925 }
2926 else {
2927 // f(*a)
2928 VM_ASSERT(calling->kw_splat == 0);
2929
2930 cfp->sp -= 1;
2931 calling->argc -= 1;
2932 VALUE ary = cfp->sp[0];
2933
2934 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2935 goto check_keyword;
2936 }
2937
2938 // check the last argument
2939 VALUE last_hash, argv_ary;
2940 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2941 if (!IS_ARGS_KEYWORD(ci) &&
2942 RARRAY_LEN(argv_ary) > 0 &&
2943 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2944 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2945
2946 rb_ary_pop(argv_ary);
2947 if (!RHASH_EMPTY_P(last_hash)) {
2948 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2949 calling->kw_splat = 1;
2950 }
2951 }
2952 }
2953 else {
2954check_keyword:
2955 if (!IS_ARGS_KEYWORD(ci) &&
2956 calling->argc > 0 &&
2957 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2958 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2959
2960 if (RHASH_EMPTY_P(last_hash)) {
2961 calling->argc--;
2962 cfp->sp -= 1;
2963 }
2964 else {
2965 cfp->sp[-1] = rb_hash_dup(last_hash);
2966 calling->kw_splat = 1;
2967 }
2968 }
2969 }
2970 }
2971 }
2972 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2973 // f(**kw)
2974 VM_ASSERT(calling->kw_splat == 1);
2975 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2976
2977 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2978 cfp->sp--;
2979 calling->argc--;
2980 calling->kw_splat = 0;
2981 }
2982 else {
2983 cfp->sp[-1] = kwh;
2984 }
2985 }
2986 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2987 // f(k1:1, k2:2)
2988 VM_ASSERT(calling->kw_splat == 0);
2989
2990 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2991 * by creating a keyword hash.
2992 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2993 */
2994 vm_caller_setup_arg_kw(cfp, calling, ci);
2995 }
2996}
2997
2998#define USE_OPT_HIST 0
2999
3000#if USE_OPT_HIST
3001#define OPT_HIST_MAX 64
3002static int opt_hist[OPT_HIST_MAX+1];
3003
3004__attribute__((destructor))
3005static void
3006opt_hist_show_results_at_exit(void)
3007{
3008 for (int i=0; i<OPT_HIST_MAX; i++) {
3009 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
3010 }
3011}
3012#endif
3013
3014static VALUE
3015vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3016 struct rb_calling_info *calling)
3017{
3018 const struct rb_callcache *cc = calling->cc;
3019 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3020 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3021 const int opt = calling->argc - lead_num;
3022 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3023 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3024 const int param = ISEQ_BODY(iseq)->param.size;
3025 const int local = ISEQ_BODY(iseq)->local_table_size;
3026 const int delta = opt_num - opt;
3027
3028 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3029
3030#if USE_OPT_HIST
3031 if (opt_pc < OPT_HIST_MAX) {
3032 opt_hist[opt]++;
3033 }
3034 else {
3035 opt_hist[OPT_HIST_MAX]++;
3036 }
3037#endif
3038
3039 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3040}
3041
3042static VALUE
3043vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3044 struct rb_calling_info *calling)
3045{
3046 const struct rb_callcache *cc = calling->cc;
3047 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3048 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3049 const int opt = calling->argc - lead_num;
3050 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3051
3052 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3053
3054#if USE_OPT_HIST
3055 if (opt_pc < OPT_HIST_MAX) {
3056 opt_hist[opt]++;
3057 }
3058 else {
3059 opt_hist[OPT_HIST_MAX]++;
3060 }
3061#endif
3062
3063 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3064}
3065
3066static void
3067args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3068 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3069 VALUE *const locals);
3070
3071static VALUE
3072vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3073 struct rb_calling_info *calling)
3074{
3075 const struct rb_callcache *cc = calling->cc;
3076 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3077 int param_size = ISEQ_BODY(iseq)->param.size;
3078 int local_size = ISEQ_BODY(iseq)->local_table_size;
3079
3080 // Setting up local size and param size
3081 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3082
3083 local_size = local_size + vm_ci_argc(calling->cd->ci);
3084 param_size = param_size + vm_ci_argc(calling->cd->ci);
3085
3086 cfp->sp[0] = (VALUE)calling->cd->ci;
3087
3088 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3089}
3090
3091static VALUE
3092vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3093 struct rb_calling_info *calling)
3094{
3095 const struct rb_callinfo *ci = calling->cd->ci;
3096 const struct rb_callcache *cc = calling->cc;
3097
3098 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3099 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3100
3101 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3102 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3103 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3104 const int ci_kw_len = kw_arg->keyword_len;
3105 const VALUE * const ci_keywords = kw_arg->keywords;
3106 VALUE *argv = cfp->sp - calling->argc;
3107 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3108 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3109 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3110 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3111 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3112
3113 int param = ISEQ_BODY(iseq)->param.size;
3114 int local = ISEQ_BODY(iseq)->local_table_size;
3115 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3116}
3117
3118static VALUE
3119vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3120 struct rb_calling_info *calling)
3121{
3122 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3123 const struct rb_callcache *cc = calling->cc;
3124
3125 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3126 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3127
3128 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3129 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3130 VALUE * const argv = cfp->sp - calling->argc;
3131 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3132
3133 int i;
3134 for (i=0; i<kw_param->num; i++) {
3135 klocals[i] = kw_param->default_values[i];
3136 }
3137 klocals[i] = INT2FIX(0); // kw specify flag
3138 // NOTE:
3139 // nobody check this value, but it should be cleared because it can
3140 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3141
3142 int param = ISEQ_BODY(iseq)->param.size;
3143 int local = ISEQ_BODY(iseq)->local_table_size;
3144 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3145}
3146
3147static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3148
3149static VALUE
3150vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3151 struct rb_calling_info *calling)
3152{
3153 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3154 cfp->sp -= (calling->argc + 1);
3155 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3156 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3157}
3158
3159VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3160
3161static void
3162warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3163{
3164 rb_vm_t *vm = GET_VM();
3165 set_table *dup_check_table = vm->unused_block_warning_table;
3166 st_data_t key;
3167 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3168
3169 union {
3170 VALUE v;
3171 unsigned char b[SIZEOF_VALUE];
3172 } k1 = {
3173 .v = (VALUE)pc,
3174 }, k2 = {
3175 .v = (VALUE)cme->def,
3176 };
3177
3178 // relax check
3179 if (!strict_unused_block) {
3180 key = (st_data_t)cme->def->original_id;
3181
3182 if (set_table_lookup(dup_check_table, key)) {
3183 return;
3184 }
3185 }
3186
3187 // strict check
3188 // make unique key from pc and me->def pointer
3189 key = 0;
3190 for (int i=0; i<SIZEOF_VALUE; i++) {
3191 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3192 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3193 }
3194
3195 if (0) {
3196 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3197 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3198 fprintf(stderr, "key:%p\n", (void *)key);
3199 }
3200
3201 // duplication check
3202 if (set_insert(dup_check_table, key)) {
3203 // already shown
3204 }
3205 else if (RTEST(ruby_verbose) || strict_unused_block) {
3206 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3207 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3208
3209 if (!NIL_P(m_loc)) {
3210 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3211 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3212 }
3213 else {
3214 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3215 }
3216 }
3217}
3218
3219static inline int
3220vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3221 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3222{
3223 const struct rb_callinfo *ci = calling->cd->ci;
3224 const struct rb_callcache *cc = calling->cc;
3225
3226 VM_ASSERT((vm_ci_argc(ci), 1));
3227 VM_ASSERT(vm_cc_cme(cc) != NULL);
3228
3229 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3230 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3231 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3232 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3233 }
3234
3235 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3236 if (LIKELY(rb_simple_iseq_p(iseq))) {
3237 rb_control_frame_t *cfp = ec->cfp;
3238 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3239 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3240
3241 if (calling->argc != lead_num) {
3242 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3243 }
3244
3245 //VM_ASSERT(ci == calling->cd->ci);
3246 VM_ASSERT(cc == calling->cc);
3247
3248 if (vm_call_iseq_optimizable_p(ci, cc)) {
3249 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3250 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3251 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3252 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3253 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3254 }
3255 else {
3256 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3257 }
3258 }
3259 return 0;
3260 }
3261 else if (rb_iseq_only_optparam_p(iseq)) {
3262 rb_control_frame_t *cfp = ec->cfp;
3263
3264 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3265 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3266
3267 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3268 const int argc = calling->argc;
3269 const int opt = argc - lead_num;
3270
3271 if (opt < 0 || opt > opt_num) {
3272 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3273 }
3274
3275 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3276 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3277 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3278 vm_call_cacheable(ci, cc));
3279 }
3280 else {
3281 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3282 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3283 vm_call_cacheable(ci, cc));
3284 }
3285
3286 /* initialize opt vars for self-references */
3287 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3288 for (int i=argc; i<lead_num + opt_num; i++) {
3289 argv[i] = Qnil;
3290 }
3291 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3292 }
3293 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3294 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3295 const int argc = calling->argc;
3296 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3297
3298 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3299 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3300
3301 if (argc - kw_arg->keyword_len == lead_num) {
3302 const int ci_kw_len = kw_arg->keyword_len;
3303 const VALUE * const ci_keywords = kw_arg->keywords;
3304 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3305 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3306
3307 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3308 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3309
3310 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3311 vm_call_cacheable(ci, cc));
3312
3313 return 0;
3314 }
3315 }
3316 else if (argc == lead_num) {
3317 /* no kwarg */
3318 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3319 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3320
3321 if (klocals[kw_param->num] == INT2FIX(0)) {
3322 /* copy from default_values */
3323 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3324 vm_call_cacheable(ci, cc));
3325 }
3326
3327 return 0;
3328 }
3329 }
3330 }
3331
3332 // Called iseq is using ... param
3333 // def foo(...) # <- iseq for foo will have "forwardable"
3334 //
3335 // We want to set the `...` local to the caller's CI
3336 // foo(1, 2) # <- the ci for this should end up as `...`
3337 //
3338 // So hopefully the stack looks like:
3339 //
3340 // => 1
3341 // => 2
3342 // => *
3343 // => **
3344 // => &
3345 // => ... # <- points at `foo`s CI
3346 // => cref_or_me
3347 // => specval
3348 // => type
3349 //
3350 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3351 bool can_fastpath = true;
3352
3353 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3354 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3355 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3356 ci = vm_ci_new_runtime(
3357 vm_ci_mid(ci),
3358 vm_ci_flag(ci),
3359 vm_ci_argc(ci),
3360 vm_ci_kwarg(ci));
3361 }
3362 else {
3363 ci = forward_cd->caller_ci;
3364 }
3365 can_fastpath = false;
3366 }
3367 // C functions calling iseqs will stack allocate a CI,
3368 // so we need to convert it to heap allocated
3369 if (!vm_ci_markable(ci)) {
3370 ci = vm_ci_new_runtime(
3371 vm_ci_mid(ci),
3372 vm_ci_flag(ci),
3373 vm_ci_argc(ci),
3374 vm_ci_kwarg(ci));
3375 can_fastpath = false;
3376 }
3377 argv[param_size - 1] = (VALUE)ci;
3378 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3379 return 0;
3380 }
3381
3382 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3383}
3384
3385static void
3386vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3387{
3388 // This case is when the caller is using a ... parameter.
3389 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3390 // In this case the caller's caller's CI will be on the stack.
3391 //
3392 // For example:
3393 //
3394 // def bar(a, b); a + b; end
3395 // def foo(...); bar(...); end
3396 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3397 //
3398 // Stack layout will be:
3399 //
3400 // > 1
3401 // > 2
3402 // > CI for foo(1, 2)
3403 // > cref_or_me
3404 // > specval
3405 // > type
3406 // > receiver
3407 // > CI for foo(1, 2), via `getlocal ...`
3408 // > ( SP points here )
3409 const VALUE * lep = VM_CF_LEP(cfp);
3410
3411 const rb_iseq_t *iseq;
3412
3413 // If we're in an escaped environment (lambda for example), get the iseq
3414 // from the captured env.
3415 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3416 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3417 iseq = env->iseq;
3418 }
3419 else { // Otherwise use the lep to find the caller
3420 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3421 }
3422
3423 // Our local storage is below the args we need to copy
3424 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3425
3426 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3427 VALUE * to = cfp->sp - 1; // clobber the CI
3428
3429 if (RTEST(splat)) {
3430 to -= 1; // clobber the splat array
3431 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3432 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3433 to += RARRAY_LEN(splat);
3434 }
3435
3436 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3437 MEMCPY(to, from, VALUE, argc);
3438 cfp->sp = to + argc;
3439
3440 // Stack layout should now be:
3441 //
3442 // > 1
3443 // > 2
3444 // > CI for foo(1, 2)
3445 // > cref_or_me
3446 // > specval
3447 // > type
3448 // > receiver
3449 // > 1
3450 // > 2
3451 // > ( SP points here )
3452}
3453
3454static VALUE
3455vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3456{
3457 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3458
3459 const struct rb_callcache *cc = calling->cc;
3460 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3461 int param_size = ISEQ_BODY(iseq)->param.size;
3462 int local_size = ISEQ_BODY(iseq)->local_table_size;
3463
3464 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3465
3466 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3467 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3468}
3469
3470static VALUE
3471vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3472{
3473 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3474
3475 const struct rb_callcache *cc = calling->cc;
3476 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3477 int param_size = ISEQ_BODY(iseq)->param.size;
3478 int local_size = ISEQ_BODY(iseq)->local_table_size;
3479
3480 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3481
3482 // Setting up local size and param size
3483 local_size = local_size + vm_ci_argc(calling->cd->ci);
3484 param_size = param_size + vm_ci_argc(calling->cd->ci);
3485
3486 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3487 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3488}
3489
3490static inline VALUE
3491vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3492 int opt_pc, int param_size, int local_size)
3493{
3494 const struct rb_callinfo *ci = calling->cd->ci;
3495 const struct rb_callcache *cc = calling->cc;
3496
3497 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3498 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3499 }
3500 else {
3501 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3502 }
3503}
3504
3505static inline VALUE
3506vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3507 int opt_pc, int param_size, int local_size)
3508{
3509 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3510 VALUE *argv = cfp->sp - calling->argc;
3511 VALUE *sp = argv + param_size;
3512 cfp->sp = argv - 1 /* recv */;
3513
3514 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3515 calling->block_handler, (VALUE)me,
3516 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3517 local_size - param_size,
3518 ISEQ_BODY(iseq)->stack_max);
3519 return Qundef;
3520}
3521
3522static inline VALUE
3523vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3524{
3525 const struct rb_callcache *cc = calling->cc;
3526 unsigned int i;
3527 VALUE *argv = cfp->sp - calling->argc;
3528 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3529 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3530 VALUE *src_argv = argv;
3531 VALUE *sp_orig, *sp;
3532 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3533
3534 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3535 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3536 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3537 dst_captured->code.val = src_captured->code.val;
3538 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3539 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3540 }
3541 else {
3542 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3543 }
3544 }
3545
3546 vm_pop_frame(ec, cfp, cfp->ep);
3547 cfp = ec->cfp;
3548
3549 sp_orig = sp = cfp->sp;
3550
3551 /* push self */
3552 sp[0] = calling->recv;
3553 sp++;
3554
3555 /* copy arguments */
3556 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3557 *sp++ = src_argv[i];
3558 }
3559
3560 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3561 calling->recv, calling->block_handler, (VALUE)me,
3562 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3563 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3564 ISEQ_BODY(iseq)->stack_max);
3565
3566 cfp->sp = sp_orig;
3567
3568 return Qundef;
3569}
3570
3571static void
3572ractor_unsafe_check(void)
3573{
3574 if (!rb_ractor_main_p()) {
3575 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3576 }
3577}
3578
3579static VALUE
3580call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3581{
3582 ractor_unsafe_check();
3583 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3584 return (*f)(recv, rb_ary_new4(argc, argv));
3585}
3586
3587static VALUE
3588call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3589{
3590 ractor_unsafe_check();
3591 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3592 return (*f)(argc, argv, recv);
3593}
3594
3595static VALUE
3596call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3597{
3598 ractor_unsafe_check();
3599 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3600 return (*f)(recv);
3601}
3602
3603static VALUE
3604call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3605{
3606 ractor_unsafe_check();
3607 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3608 return (*f)(recv, argv[0]);
3609}
3610
3611static VALUE
3612call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3613{
3614 ractor_unsafe_check();
3615 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3616 return (*f)(recv, argv[0], argv[1]);
3617}
3618
3619static VALUE
3620call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3621{
3622 ractor_unsafe_check();
3623 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3624 return (*f)(recv, argv[0], argv[1], argv[2]);
3625}
3626
3627static VALUE
3628call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3629{
3630 ractor_unsafe_check();
3631 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3632 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3633}
3634
3635static VALUE
3636call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3637{
3638 ractor_unsafe_check();
3639 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3640 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3641}
3642
3643static VALUE
3644call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3645{
3646 ractor_unsafe_check();
3648 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3649}
3650
3651static VALUE
3652call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3653{
3654 ractor_unsafe_check();
3656 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3657}
3658
3659static VALUE
3660call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3661{
3662 ractor_unsafe_check();
3664 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3665}
3666
3667static VALUE
3668call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3669{
3670 ractor_unsafe_check();
3672 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3673}
3674
3675static VALUE
3676call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3677{
3678 ractor_unsafe_check();
3680 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3681}
3682
3683static VALUE
3684call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3685{
3686 ractor_unsafe_check();
3688 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3689}
3690
3691static VALUE
3692call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3693{
3694 ractor_unsafe_check();
3696 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3697}
3698
3699static VALUE
3700call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3701{
3702 ractor_unsafe_check();
3704 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3705}
3706
3707static VALUE
3708call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3709{
3710 ractor_unsafe_check();
3712 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3713}
3714
3715static VALUE
3716call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3717{
3718 ractor_unsafe_check();
3720 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3721}
3722
3723static VALUE
3724ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3725{
3726 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3727 return (*f)(recv, rb_ary_new4(argc, argv));
3728}
3729
3730static VALUE
3731ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3732{
3733 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3734 return (*f)(argc, argv, recv);
3735}
3736
3737static VALUE
3738ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3739{
3740 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3741 return (*f)(recv);
3742}
3743
3744static VALUE
3745ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3746{
3747 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3748 return (*f)(recv, argv[0]);
3749}
3750
3751static VALUE
3752ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3753{
3754 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3755 return (*f)(recv, argv[0], argv[1]);
3756}
3757
3758static VALUE
3759ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3760{
3761 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3762 return (*f)(recv, argv[0], argv[1], argv[2]);
3763}
3764
3765static VALUE
3766ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3767{
3768 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3769 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3770}
3771
3772static VALUE
3773ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3774{
3775 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3776 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3777}
3778
3779static VALUE
3780ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3781{
3783 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3784}
3785
3786static VALUE
3787ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3788{
3790 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3791}
3792
3793static VALUE
3794ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3795{
3797 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3798}
3799
3800static VALUE
3801ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3802{
3804 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3805}
3806
3807static VALUE
3808ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3809{
3811 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3812}
3813
3814static VALUE
3815ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3816{
3818 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3819}
3820
3821static VALUE
3822ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3823{
3825 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3826}
3827
3828static VALUE
3829ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3830{
3832 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3833}
3834
3835static VALUE
3836ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3837{
3839 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3840}
3841
3842static VALUE
3843ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3844{
3846 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3847}
3848
3849static inline int
3850vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3851{
3852 const int ov_flags = RAISED_STACKOVERFLOW;
3853 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3854 if (rb_ec_raised_p(ec, ov_flags)) {
3855 rb_ec_raised_reset(ec, ov_flags);
3856 return TRUE;
3857 }
3858 return FALSE;
3859}
3860
3861#define CHECK_CFP_CONSISTENCY(func) \
3862 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3863 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3864
3865static inline
3866const rb_method_cfunc_t *
3867vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3868{
3869#if VM_DEBUG_VERIFY_METHOD_CACHE
3870 switch (me->def->type) {
3871 case VM_METHOD_TYPE_CFUNC:
3872 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3873 break;
3874# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3875 METHOD_BUG(ISEQ);
3876 METHOD_BUG(ATTRSET);
3877 METHOD_BUG(IVAR);
3878 METHOD_BUG(BMETHOD);
3879 METHOD_BUG(ZSUPER);
3880 METHOD_BUG(UNDEF);
3881 METHOD_BUG(OPTIMIZED);
3882 METHOD_BUG(MISSING);
3883 METHOD_BUG(REFINED);
3884 METHOD_BUG(ALIAS);
3885# undef METHOD_BUG
3886 default:
3887 rb_bug("wrong method type: %d", me->def->type);
3888 }
3889#endif
3890 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3891}
3892
3893static VALUE
3894vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3895 int argc, VALUE *argv, VALUE *stack_bottom)
3896{
3897 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3898 const struct rb_callinfo *ci = calling->cd->ci;
3899 const struct rb_callcache *cc = calling->cc;
3900 VALUE val;
3901 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3902 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3903
3904 VALUE recv = calling->recv;
3905 VALUE block_handler = calling->block_handler;
3906 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3907
3908 if (UNLIKELY(calling->kw_splat)) {
3909 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3910 }
3911
3912 VM_ASSERT(reg_cfp == ec->cfp);
3913
3914 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3915 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3916
3917 vm_push_frame(ec, NULL, frame_type, recv,
3918 block_handler, (VALUE)me,
3919 0, ec->cfp->sp, 0, 0);
3920
3921 int len = cfunc->argc;
3922 if (len >= 0) rb_check_arity(argc, len, len);
3923
3924 reg_cfp->sp = stack_bottom;
3925 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3926
3927 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3928
3929 rb_vm_pop_frame(ec);
3930
3931 VM_ASSERT(ec->cfp->sp == stack_bottom);
3932
3933 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3934 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3935
3936 return val;
3937}
3938
3939// Push a C method frame for a given cme. This is called when JIT code skipped
3940// pushing a frame but the C method reached a point where a frame is needed.
3941void
3942rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3943{
3944 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3945 rb_execution_context_t *ec = GET_EC();
3946 VALUE *sp = ec->cfp->sp;
3947 VALUE recv = *(sp - recv_idx - 1);
3948 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3949 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3950#if VM_CHECK_MODE > 0
3951 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3952 *(GET_EC()->cfp->sp) = Qfalse;
3953#endif
3954 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3955}
3956
3957// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3958bool
3959rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3960{
3961 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3962}
3963
3964static VALUE
3965vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3966{
3967 int argc = calling->argc;
3968 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3969 VALUE *argv = &stack_bottom[1];
3970
3971 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3972}
3973
3974static VALUE
3975vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3976{
3977 const struct rb_callinfo *ci = calling->cd->ci;
3978 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3979
3980 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3981 VALUE argv_ary;
3982 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3983 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3984 int argc = RARRAY_LENINT(argv_ary);
3985 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3986 VALUE *stack_bottom = reg_cfp->sp - 2;
3987
3988 VM_ASSERT(calling->argc == 1);
3989 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3990 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3991
3992 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3993 }
3994 else {
3995 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3996
3997 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3998 }
3999}
4000
4001static inline VALUE
4002vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
4003{
4004 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
4005 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
4006
4007 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
4008 return vm_call_cfunc_other(ec, reg_cfp, calling);
4009 }
4010
4011 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4012 calling->kw_splat = 0;
4013 int i;
4014 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
4015 VALUE *sp = stack_bottom;
4016 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
4017 for(i = 0; i < argc; i++) {
4018 *++sp = argv[i];
4019 }
4020 reg_cfp->sp = sp+1;
4021
4022 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4023}
4024
4025static inline VALUE
4026vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4027{
4028 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4029 VALUE argv_ary = reg_cfp->sp[-1];
4030 int argc = RARRAY_LENINT(argv_ary);
4031 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4032 VALUE last_hash;
4033 int argc_offset = 0;
4034
4035 if (UNLIKELY(argc > 0 &&
4036 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4037 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4038 if (!RHASH_EMPTY_P(last_hash)) {
4039 return vm_call_cfunc_other(ec, reg_cfp, calling);
4040 }
4041 argc_offset++;
4042 }
4043 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4044}
4045
4046static inline VALUE
4047vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4048{
4049 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4050 VALUE keyword_hash = reg_cfp->sp[-1];
4051
4052 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4053 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4054 }
4055
4056 return vm_call_cfunc_other(ec, reg_cfp, calling);
4057}
4058
4059static VALUE
4060vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4061{
4062 const struct rb_callinfo *ci = calling->cd->ci;
4063 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4064
4065 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4066 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4067 // f(*a)
4068 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4069 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4070 }
4071 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4072 // f(*a, **kw)
4073 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4074 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4075 }
4076 }
4077
4078 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4079 return vm_call_cfunc_other(ec, reg_cfp, calling);
4080}
4081
4082static VALUE
4083vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4084{
4085 const struct rb_callcache *cc = calling->cc;
4086 RB_DEBUG_COUNTER_INC(ccf_ivar);
4087 cfp->sp -= 1;
4088 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4089 return ivar;
4090}
4091
4092static VALUE
4093vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4094{
4095 RB_DEBUG_COUNTER_INC(ccf_attrset);
4096 VALUE val = *(cfp->sp - 1);
4097 cfp->sp -= 2;
4098 attr_index_t index;
4099 shape_id_t dest_shape_id;
4100 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4101 ID id = vm_cc_cme(cc)->def->body.attr.id;
4102 rb_check_frozen(obj);
4103 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4104 if (UNDEF_P(res)) {
4105 switch (BUILTIN_TYPE(obj)) {
4106 case T_OBJECT:
4107 break;
4108 case T_CLASS:
4109 case T_MODULE:
4110 {
4111 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4112 if (!UNDEF_P(res)) {
4113 return res;
4114 }
4115 }
4116 break;
4117 default:
4118 {
4119 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4120 if (!UNDEF_P(res)) {
4121 return res;
4122 }
4123 }
4124 }
4125 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4126 }
4127 return res;
4128}
4129
4130static VALUE
4131vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4132{
4133 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4134}
4135
4136static inline VALUE
4137vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4138{
4139 rb_proc_t *proc;
4140 VALUE val;
4141 const struct rb_callcache *cc = calling->cc;
4142 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4143 VALUE procv = cme->def->body.bmethod.proc;
4144
4145 if (!RB_OBJ_SHAREABLE_P(procv) &&
4146 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4147 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4148 }
4149
4150 /* control block frame */
4151 GetProcPtr(procv, proc);
4152 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4153
4154 return val;
4155}
4156
4157static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4158
4159static VALUE
4160vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4161{
4162 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4163
4164 const struct rb_callcache *cc = calling->cc;
4165 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4166 VALUE procv = cme->def->body.bmethod.proc;
4167
4168 if (!RB_OBJ_SHAREABLE_P(procv) &&
4169 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4170 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4171 }
4172
4173 rb_proc_t *proc;
4174 GetProcPtr(procv, proc);
4175 const struct rb_block *block = &proc->block;
4176
4177 while (vm_block_type(block) == block_type_proc) {
4178 block = vm_proc_block(block->as.proc);
4179 }
4180 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4181
4182 const struct rb_captured_block *captured = &block->as.captured;
4183 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4184 VALUE * const argv = cfp->sp - calling->argc;
4185 const int arg_size = ISEQ_BODY(iseq)->param.size;
4186
4187 int opt_pc;
4188 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4189 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4190 }
4191 else {
4192 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4193 }
4194
4195 cfp->sp = argv - 1; // -1 for the receiver
4196
4197 vm_push_frame(ec, iseq,
4198 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4199 calling->recv,
4200 VM_GUARDED_PREV_EP(captured->ep),
4201 (VALUE)cme,
4202 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4203 argv + arg_size,
4204 ISEQ_BODY(iseq)->local_table_size - arg_size,
4205 ISEQ_BODY(iseq)->stack_max);
4206
4207 return Qundef;
4208}
4209
4210static VALUE
4211vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4212{
4213 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4214
4215 VALUE *argv;
4216 int argc;
4217 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4218 if (UNLIKELY(calling->heap_argv)) {
4219 argv = RARRAY_PTR(calling->heap_argv);
4220 cfp->sp -= 2;
4221 }
4222 else {
4223 argc = calling->argc;
4224 argv = ALLOCA_N(VALUE, argc);
4225 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4226 cfp->sp += - argc - 1;
4227 }
4228
4229 return vm_call_bmethod_body(ec, calling, argv);
4230}
4231
4232static VALUE
4233vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4234{
4235 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4236
4237 const struct rb_callcache *cc = calling->cc;
4238 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4239 VALUE procv = cme->def->body.bmethod.proc;
4240 rb_proc_t *proc;
4241 GetProcPtr(procv, proc);
4242 const struct rb_block *block = &proc->block;
4243
4244 while (vm_block_type(block) == block_type_proc) {
4245 block = vm_proc_block(block->as.proc);
4246 }
4247 if (vm_block_type(block) == block_type_iseq) {
4248 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4249 return vm_call_iseq_bmethod(ec, cfp, calling);
4250 }
4251
4252 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4253 return vm_call_noniseq_bmethod(ec, cfp, calling);
4254}
4255
4256VALUE
4257rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4258{
4259 VALUE klass = current_class;
4260
4261 /* for prepended Module, then start from cover class */
4262 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4263 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4264 klass = RBASIC_CLASS(klass);
4265 }
4266
4267 while (RTEST(klass)) {
4268 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4269 if (owner == target_owner) {
4270 return klass;
4271 }
4272 klass = RCLASS_SUPER(klass);
4273 }
4274
4275 return current_class; /* maybe module function */
4276}
4277
4278static const rb_callable_method_entry_t *
4279aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4280{
4281 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4282 const rb_callable_method_entry_t *cme;
4283
4284 if (orig_me->defined_class == 0) {
4285 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4286 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4287 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4288
4289 if (me->def->reference_count == 1) {
4290 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4291 }
4292 else {
4294 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4295 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4296 }
4297 }
4298 else {
4299 cme = (const rb_callable_method_entry_t *)orig_me;
4300 }
4301
4302 VM_ASSERT(callable_method_entry_p(cme));
4303 return cme;
4304}
4305
4307rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4308{
4309 return aliased_callable_method_entry(me);
4310}
4311
4312static VALUE
4313vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4314{
4315 calling->cc = &VM_CC_ON_STACK(Qundef,
4316 vm_call_general,
4317 {{0}},
4318 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4319
4320 return vm_call_method_each_type(ec, cfp, calling);
4321}
4322
4323static enum method_missing_reason
4324ci_missing_reason(const struct rb_callinfo *ci)
4325{
4326 enum method_missing_reason stat = MISSING_NOENTRY;
4327 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4328 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4329 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4330 return stat;
4331}
4332
4333static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4334
4335static VALUE
4336vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4337 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4338{
4339 ASSUME(calling->argc >= 0);
4340
4341 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4342 int argc = calling->argc;
4343 VALUE recv = calling->recv;
4344 VALUE klass = CLASS_OF(recv);
4345 ID mid = rb_check_id(&symbol);
4346 flags |= VM_CALL_OPT_SEND;
4347
4348 if (UNLIKELY(! mid)) {
4349 mid = idMethodMissing;
4350 missing_reason = ci_missing_reason(ci);
4351 ec->method_missing_reason = missing_reason;
4352
4353 VALUE argv_ary;
4354 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4355 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4356 rb_ary_unshift(argv_ary, symbol);
4357
4358 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4359 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4360 VALUE exc = rb_make_no_method_exception(
4361 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4362
4363 rb_exc_raise(exc);
4364 }
4365 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4366 }
4367 else {
4368 /* E.g. when argc == 2
4369 *
4370 * | | | | TOPN
4371 * | | +------+
4372 * | | +---> | arg1 | 0
4373 * +------+ | +------+
4374 * | arg1 | -+ +-> | arg0 | 1
4375 * +------+ | +------+
4376 * | arg0 | ---+ | sym | 2
4377 * +------+ +------+
4378 * | recv | | recv | 3
4379 * --+------+--------+------+------
4380 */
4381 int i = argc;
4382 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4383 INC_SP(1);
4384 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4385 argc = ++calling->argc;
4386
4387 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4388 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4389 TOPN(i) = symbol;
4390 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4391 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4392 VALUE exc = rb_make_no_method_exception(
4393 rb_eNoMethodError, 0, recv, argc, argv, priv);
4394
4395 rb_exc_raise(exc);
4396 }
4397 else {
4398 TOPN(i) = rb_str_intern(symbol);
4399 }
4400 }
4401 }
4402
4403 struct rb_forwarding_call_data new_fcd = {
4404 .cd = {
4405 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4406 .cc = NULL,
4407 },
4408 .caller_ci = NULL,
4409 };
4410
4411 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4412 calling->cd = &new_fcd.cd;
4413 }
4414 else {
4415 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4416 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4417 new_fcd.caller_ci = caller_ci;
4418 calling->cd = (struct rb_call_data *)&new_fcd;
4419 }
4420 calling->cc = &VM_CC_ON_STACK(klass,
4421 vm_call_general,
4422 { .method_missing_reason = missing_reason },
4423 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4424
4425 if (flags & VM_CALL_FCALL) {
4426 return vm_call_method(ec, reg_cfp, calling);
4427 }
4428
4429 const struct rb_callcache *cc = calling->cc;
4430 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4431
4432 if (vm_cc_cme(cc) != NULL) {
4433 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4434 case METHOD_VISI_PUBLIC: /* likely */
4435 return vm_call_method_each_type(ec, reg_cfp, calling);
4436 case METHOD_VISI_PRIVATE:
4437 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4438 break;
4439 case METHOD_VISI_PROTECTED:
4440 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4441 break;
4442 default:
4443 VM_UNREACHABLE(vm_call_method);
4444 }
4445 return vm_call_method_missing(ec, reg_cfp, calling);
4446 }
4447
4448 return vm_call_method_nome(ec, reg_cfp, calling);
4449}
4450
4451static VALUE
4452vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4453{
4454 const struct rb_callinfo *ci = calling->cd->ci;
4455 int i;
4456 VALUE sym;
4457
4458 i = calling->argc - 1;
4459
4460 if (calling->argc == 0) {
4461 rb_raise(rb_eArgError, "no method name given");
4462 }
4463
4464 sym = TOPN(i);
4465 /* E.g. when i == 2
4466 *
4467 * | | | | TOPN
4468 * +------+ | |
4469 * | arg1 | ---+ | | 0
4470 * +------+ | +------+
4471 * | arg0 | -+ +-> | arg1 | 1
4472 * +------+ | +------+
4473 * | sym | +---> | arg0 | 2
4474 * +------+ +------+
4475 * | recv | | recv | 3
4476 * --+------+--------+------+------
4477 */
4478 /* shift arguments */
4479 if (i > 0) {
4480 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4481 }
4482 calling->argc -= 1;
4483 DEC_SP(1);
4484
4485 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4486}
4487
4488static VALUE
4489vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4490{
4491 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4492 const struct rb_callinfo *ci = calling->cd->ci;
4493 int flags = VM_CALL_FCALL;
4494 VALUE sym;
4495
4496 VALUE argv_ary;
4497 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4498 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4499 sym = rb_ary_shift(argv_ary);
4500 flags |= VM_CALL_ARGS_SPLAT;
4501 if (calling->kw_splat) {
4502 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4503 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4504 calling->kw_splat = 0;
4505 }
4506 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4507 }
4508
4509 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4510 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4511}
4512
4513static VALUE
4514vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4515{
4516 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4517 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4518}
4519
4520static VALUE
4521vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4522{
4523 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4524
4525 const struct rb_callinfo *ci = calling->cd->ci;
4526 int flags = vm_ci_flag(ci);
4527
4528 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4529 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4530 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4531 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4532 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4533 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4534 }
4535
4536 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4537 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4538}
4539
4540static VALUE
4541vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4542 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4543{
4544 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4545
4546 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4547 unsigned int argc, flag;
4548
4549 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4550 argc = ++calling->argc;
4551
4552 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4553 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4554 vm_check_canary(ec, reg_cfp->sp);
4555 if (argc > 1) {
4556 MEMMOVE(argv+1, argv, VALUE, argc-1);
4557 }
4558 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4559 INC_SP(1);
4560
4561 ec->method_missing_reason = reason;
4562
4563 struct rb_forwarding_call_data new_fcd = {
4564 .cd = {
4565 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4566 .cc = NULL,
4567 },
4568 .caller_ci = NULL,
4569 };
4570
4571 if (!(flag & VM_CALL_FORWARDING)) {
4572 calling->cd = &new_fcd.cd;
4573 }
4574 else {
4575 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4576 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4577 new_fcd.caller_ci = caller_ci;
4578 calling->cd = (struct rb_call_data *)&new_fcd;
4579 }
4580
4581 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4582 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4583 return vm_call_method(ec, reg_cfp, calling);
4584}
4585
4586static VALUE
4587vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4588{
4589 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4590}
4591
4592static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4593static VALUE
4594vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4595{
4596 klass = RCLASS_SUPER(klass);
4597
4598 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4599 if (cme == NULL) {
4600 return vm_call_method_nome(ec, cfp, calling);
4601 }
4602 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4603 cme->def->body.refined.orig_me) {
4604 cme = refined_method_callable_without_refinement(cme);
4605 }
4606
4607 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4608
4609 return vm_call_method_each_type(ec, cfp, calling);
4610}
4611
4612static inline VALUE
4613find_refinement(VALUE refinements, VALUE klass)
4614{
4615 if (NIL_P(refinements)) {
4616 return Qnil;
4617 }
4618 return rb_hash_lookup(refinements, klass);
4619}
4620
4621PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4622static rb_control_frame_t *
4623current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4624{
4625 rb_control_frame_t *top_cfp = cfp;
4626
4627 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4628 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4629
4630 do {
4631 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4632 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4633 /* TODO: orphan block */
4634 return top_cfp;
4635 }
4636 } while (cfp->iseq != local_iseq);
4637 }
4638 return cfp;
4639}
4640
4641static const rb_callable_method_entry_t *
4642refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4643{
4644 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4645 const rb_callable_method_entry_t *cme;
4646
4647 if (orig_me->defined_class == 0) {
4648 cme = NULL;
4650 }
4651 else {
4652 cme = (const rb_callable_method_entry_t *)orig_me;
4653 }
4654
4655 VM_ASSERT(callable_method_entry_p(cme));
4656
4657 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4658 cme = NULL;
4659 }
4660
4661 return cme;
4662}
4663
4664static const rb_callable_method_entry_t *
4665search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4666{
4667 ID mid = vm_ci_mid(calling->cd->ci);
4668 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4669 const struct rb_callcache * const cc = calling->cc;
4670 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4671
4672 for (; cref; cref = CREF_NEXT(cref)) {
4673 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4674 if (NIL_P(refinement)) continue;
4675
4676 const rb_callable_method_entry_t *const ref_me =
4677 rb_callable_method_entry(refinement, mid);
4678
4679 if (ref_me) {
4680 if (vm_cc_call(cc) == vm_call_super_method) {
4681 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4682 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4683 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4684 continue;
4685 }
4686 }
4687
4688 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4689 cme->def != ref_me->def) {
4690 cme = ref_me;
4691 }
4692 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4693 return cme;
4694 }
4695 }
4696 else {
4697 return NULL;
4698 }
4699 }
4700
4701 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4702 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4703 }
4704 else {
4705 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4706 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4707 return cme;
4708 }
4709}
4710
4711static VALUE
4712vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4713{
4714 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4715
4716 if (ref_cme) {
4717 if (calling->cd->cc) {
4718 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4719 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4720 return vm_call_method(ec, cfp, calling);
4721 }
4722 else {
4723 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4724 calling->cc= ref_cc;
4725 return vm_call_method(ec, cfp, calling);
4726 }
4727 }
4728 else {
4729 return vm_call_method_nome(ec, cfp, calling);
4730 }
4731}
4732
4733static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4734
4735NOINLINE(static VALUE
4736 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4737 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4738
4739static VALUE
4740vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4741 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4742{
4743 int argc = calling->argc;
4744
4745 /* remove self */
4746 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4747 DEC_SP(1);
4748
4749 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4750}
4751
4752static VALUE
4753vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4754{
4755 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4756
4757 const struct rb_callinfo *ci = calling->cd->ci;
4758 VALUE procval = calling->recv;
4759 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4760}
4761
4762static VALUE
4763vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4764{
4765 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4766
4767 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4768 const struct rb_callinfo *ci = calling->cd->ci;
4769
4770 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4771 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4772 }
4773 else {
4774 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4775 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4776 return vm_call_general(ec, reg_cfp, calling);
4777 }
4778}
4779
4780static VALUE
4781vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4782{
4783 VALUE recv = calling->recv;
4784
4785 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4786 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4787 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4788
4789 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4790 return internal_RSTRUCT_GET(recv, off);
4791}
4792
4793static VALUE
4794vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4795{
4796 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4797
4798 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4799 reg_cfp->sp -= 1;
4800 return ret;
4801}
4802
4803static VALUE
4804vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4805{
4806 VALUE recv = calling->recv;
4807
4808 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4809 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4810 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4811
4812 rb_check_frozen(recv);
4813
4814 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4815 internal_RSTRUCT_SET(recv, off, val);
4816
4817 return val;
4818}
4819
4820static VALUE
4821vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4822{
4823 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4824
4825 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4826 reg_cfp->sp -= 2;
4827 return ret;
4828}
4829
4830NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4831 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4832
4833#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4834 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4835 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4836 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4837 var = func; \
4838 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4839 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4840 } \
4841 else { \
4842 nohook; \
4843 var = func; \
4844 }
4845
4846static VALUE
4847vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4848 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4849{
4850 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4851 case OPTIMIZED_METHOD_TYPE_SEND:
4852 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4853 return vm_call_opt_send(ec, cfp, calling);
4854 case OPTIMIZED_METHOD_TYPE_CALL:
4855 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4856 return vm_call_opt_call(ec, cfp, calling);
4857 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4858 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4859 return vm_call_opt_block_call(ec, cfp, calling);
4860 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4861 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4862 rb_check_arity(calling->argc, 0, 0);
4863
4864 VALUE v;
4865 VM_CALL_METHOD_ATTR(v,
4866 vm_call_opt_struct_aref(ec, cfp, calling),
4867 set_vm_cc_ivar(cc); \
4868 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4869 return v;
4870 }
4871 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4872 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4873 rb_check_arity(calling->argc, 1, 1);
4874
4875 VALUE v;
4876 VM_CALL_METHOD_ATTR(v,
4877 vm_call_opt_struct_aset(ec, cfp, calling),
4878 set_vm_cc_ivar(cc); \
4879 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4880 return v;
4881 }
4882 default:
4883 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4884 }
4885}
4886
4887static VALUE
4888vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4889{
4890 const struct rb_callinfo *ci = calling->cd->ci;
4891 const struct rb_callcache *cc = calling->cc;
4892 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4893 VALUE v;
4894
4895 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4896
4897 switch (cme->def->type) {
4898 case VM_METHOD_TYPE_ISEQ:
4899 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4900 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4901 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4902 }
4903 else {
4904 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4905 return vm_call_iseq_setup(ec, cfp, calling);
4906 }
4907
4908 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4909 case VM_METHOD_TYPE_CFUNC:
4910 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4911 return vm_call_cfunc(ec, cfp, calling);
4912
4913 case VM_METHOD_TYPE_ATTRSET:
4914 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4915
4916 rb_check_arity(calling->argc, 1, 1);
4917
4918 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4919
4920 if (vm_cc_markable(cc)) {
4921 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4922 VM_CALL_METHOD_ATTR(v,
4923 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4924 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4925 }
4926 else {
4927 cc = &((struct rb_callcache) {
4928 .flags = T_IMEMO |
4929 (imemo_callcache << FL_USHIFT) |
4930 VM_CALLCACHE_UNMARKABLE |
4931 VM_CALLCACHE_ON_STACK,
4932 .klass = cc->klass,
4933 .cme_ = cc->cme_,
4934 .call_ = cc->call_,
4935 .aux_ = {
4936 .attr = {
4937 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4938 }
4939 },
4940 });
4941
4942 VM_CALL_METHOD_ATTR(v,
4943 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4944 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4945 }
4946 return v;
4947
4948 case VM_METHOD_TYPE_IVAR:
4949 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4950 rb_check_arity(calling->argc, 0, 0);
4951 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4952 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4953 VM_CALL_METHOD_ATTR(v,
4954 vm_call_ivar(ec, cfp, calling),
4955 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4956 return v;
4957
4958 case VM_METHOD_TYPE_MISSING:
4959 vm_cc_method_missing_reason_set(cc, 0);
4960 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4961 return vm_call_method_missing(ec, cfp, calling);
4962
4963 case VM_METHOD_TYPE_BMETHOD:
4964 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4965 return vm_call_bmethod(ec, cfp, calling);
4966
4967 case VM_METHOD_TYPE_ALIAS:
4968 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4969 return vm_call_alias(ec, cfp, calling);
4970
4971 case VM_METHOD_TYPE_OPTIMIZED:
4972 return vm_call_optimized(ec, cfp, calling, ci, cc);
4973
4974 case VM_METHOD_TYPE_UNDEF:
4975 break;
4976
4977 case VM_METHOD_TYPE_ZSUPER:
4978 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4979
4980 case VM_METHOD_TYPE_REFINED:
4981 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4982 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4983 return vm_call_refined(ec, cfp, calling);
4984 }
4985
4986 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4987}
4988
4989NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4990
4991static VALUE
4992vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4993{
4994 /* method missing */
4995 const struct rb_callinfo *ci = calling->cd->ci;
4996 const int stat = ci_missing_reason(ci);
4997
4998 if (vm_ci_mid(ci) == idMethodMissing) {
4999 if (UNLIKELY(calling->heap_argv)) {
5000 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
5001 }
5002 else {
5003 rb_control_frame_t *reg_cfp = cfp;
5004 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
5005 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
5006 }
5007 }
5008 else {
5009 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
5010 }
5011}
5012
5013/* Protected method calls and super invocations need to check that the receiver
5014 * (self for super) inherits the module on which the method is defined.
5015 * In the case of refinements, it should consider the original class not the
5016 * refinement.
5017 */
5018static VALUE
5019vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
5020{
5021 VALUE defined_class = me->defined_class;
5022 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5023 return NIL_P(refined_class) ? defined_class : refined_class;
5024}
5025
5026static inline VALUE
5027vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5028{
5029 const struct rb_callinfo *ci = calling->cd->ci;
5030 const struct rb_callcache *cc = calling->cc;
5031
5032 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5033
5034 if (vm_cc_cme(cc) != NULL) {
5035 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5036 case METHOD_VISI_PUBLIC: /* likely */
5037 return vm_call_method_each_type(ec, cfp, calling);
5038
5039 case METHOD_VISI_PRIVATE:
5040 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5041 enum method_missing_reason stat = MISSING_PRIVATE;
5042 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5043
5044 vm_cc_method_missing_reason_set(cc, stat);
5045 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5046 return vm_call_method_missing(ec, cfp, calling);
5047 }
5048 return vm_call_method_each_type(ec, cfp, calling);
5049
5050 case METHOD_VISI_PROTECTED:
5051 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5052 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5053 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5054 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5055 return vm_call_method_missing(ec, cfp, calling);
5056 }
5057 else {
5058 /* caching method info to dummy cc */
5059 VM_ASSERT(vm_cc_cme(cc) != NULL);
5060 struct rb_callcache cc_on_stack = *cc;
5061 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5062 calling->cc = &cc_on_stack;
5063 return vm_call_method_each_type(ec, cfp, calling);
5064 }
5065 }
5066 return vm_call_method_each_type(ec, cfp, calling);
5067
5068 default:
5069 rb_bug("unreachable");
5070 }
5071 }
5072 else {
5073 return vm_call_method_nome(ec, cfp, calling);
5074 }
5075}
5076
5077static VALUE
5078vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5079{
5080 RB_DEBUG_COUNTER_INC(ccf_general);
5081 return vm_call_method(ec, reg_cfp, calling);
5082}
5083
5084void
5085rb_vm_cc_general(const struct rb_callcache *cc)
5086{
5087 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5088 VM_ASSERT(cc != vm_cc_empty());
5089
5090 *(vm_call_handler *)&cc->call_ = vm_call_general;
5091}
5092
5093static VALUE
5094vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5095{
5096 RB_DEBUG_COUNTER_INC(ccf_super_method);
5097
5098 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5099 // can merge the function and the address of the function becomes same.
5100 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5101 if (ec == NULL) rb_bug("unreachable");
5102
5103 /* this check is required to distinguish with other functions. */
5104 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5105 return vm_call_method(ec, reg_cfp, calling);
5106}
5107
5108/* super */
5109
5110static inline VALUE
5111vm_search_normal_superclass(VALUE klass)
5112{
5113 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5114 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5115 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5116 klass = RBASIC(klass)->klass;
5117 }
5118 klass = RCLASS_ORIGIN(klass);
5119 return RCLASS_SUPER(klass);
5120}
5121
5122NORETURN(static void vm_super_outside(void));
5123
5124static void
5125vm_super_outside(void)
5126{
5127 rb_raise(rb_eNoMethodError, "super called outside of method");
5128}
5129
5130static const struct rb_callcache *
5131empty_cc_for_super(void)
5132{
5133 return &vm_empty_cc_for_super;
5134}
5135
5136static const struct rb_callcache *
5137vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5138{
5139 VALUE current_defined_class;
5140 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5141
5142 if (!me) {
5143 vm_super_outside();
5144 }
5145
5146 current_defined_class = vm_defined_class_for_protected_call(me);
5147
5148 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5149 reg_cfp->iseq != method_entry_iseqptr(me) &&
5150 !rb_obj_is_kind_of(recv, current_defined_class)) {
5151 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5152 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5153
5154 if (m) { /* not bound UnboundMethod */
5155 rb_raise(rb_eTypeError,
5156 "self has wrong type to call super in this context: "
5157 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5158 rb_obj_class(recv), m);
5159 }
5160 }
5161
5162 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5163 rb_raise(rb_eRuntimeError,
5164 "implicit argument passing of super from method defined"
5165 " by define_method() is not supported."
5166 " Specify all arguments explicitly.");
5167 }
5168
5169 ID mid = me->def->original_id;
5170
5171 if (!vm_ci_markable(cd->ci)) {
5172 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5173 }
5174 else {
5175 // update iseq. really? (TODO)
5176 cd->ci = vm_ci_new_runtime(mid,
5177 vm_ci_flag(cd->ci),
5178 vm_ci_argc(cd->ci),
5179 vm_ci_kwarg(cd->ci));
5180
5181 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5182 }
5183
5184 const struct rb_callcache *cc;
5185
5186 VALUE klass = vm_search_normal_superclass(me->defined_class);
5187
5188 if (!klass) {
5189 /* bound instance method of module */
5190 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5191 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5192 }
5193 else {
5194 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5195 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5196
5197 // define_method can cache for different method id
5198 if (cached_cme == NULL) {
5199 // empty_cc_for_super is not markable object
5200 cd->cc = empty_cc_for_super();
5201 }
5202 else if (cached_cme->called_id != mid) {
5203 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5204 if (cme) {
5205 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5206 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5207 }
5208 else {
5209 cd->cc = cc = empty_cc_for_super();
5210 }
5211 }
5212 else {
5213 switch (cached_cme->def->type) {
5214 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5215 case VM_METHOD_TYPE_REFINED:
5216 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5217 case VM_METHOD_TYPE_ATTRSET:
5218 case VM_METHOD_TYPE_IVAR:
5219 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5220 break;
5221 default:
5222 break; // use fastpath
5223 }
5224 }
5225 }
5226
5227 VM_ASSERT((vm_cc_cme(cc), true));
5228
5229 return cc;
5230}
5231
5232/* yield */
5233
5234static inline int
5235block_proc_is_lambda(const VALUE procval)
5236{
5237 rb_proc_t *proc;
5238
5239 if (procval) {
5240 GetProcPtr(procval, proc);
5241 return proc->is_lambda;
5242 }
5243 else {
5244 return 0;
5245 }
5246}
5247
5248static VALUE
5249vm_yield_with_cfunc(rb_execution_context_t *ec,
5250 const struct rb_captured_block *captured,
5251 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5253{
5254 int is_lambda = FALSE; /* TODO */
5255 VALUE val, arg, blockarg;
5256 int frame_flag;
5257 const struct vm_ifunc *ifunc = captured->code.ifunc;
5258
5259 if (is_lambda) {
5260 arg = rb_ary_new4(argc, argv);
5261 }
5262 else if (argc == 0) {
5263 arg = Qnil;
5264 }
5265 else {
5266 arg = argv[0];
5267 }
5268
5269 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5270
5271 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5272 if (kw_splat) {
5273 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5274 }
5275
5276 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5277 frame_flag,
5278 self,
5279 VM_GUARDED_PREV_EP(captured->ep),
5280 (VALUE)me,
5281 0, ec->cfp->sp, 0, 0);
5282 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5283 rb_vm_pop_frame(ec);
5284
5285 return val;
5286}
5287
5288VALUE
5289rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5290{
5291 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5292}
5293
5294static VALUE
5295vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5296{
5297 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5298}
5299
5300static inline int
5301vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5302{
5303 int i;
5304 long len = RARRAY_LEN(ary);
5305
5306 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5307
5308 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5309 argv[i] = RARRAY_AREF(ary, i);
5310 }
5311
5312 return i;
5313}
5314
5315static inline VALUE
5316vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5317{
5318 VALUE ary, arg0 = argv[0];
5319 ary = rb_check_array_type(arg0);
5320#if 0
5321 argv[0] = arg0;
5322#else
5323 VM_ASSERT(argv[0] == arg0);
5324#endif
5325 return ary;
5326}
5327
5328static int
5329vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5330{
5331 if (rb_simple_iseq_p(iseq)) {
5332 rb_control_frame_t *cfp = ec->cfp;
5333 VALUE arg0;
5334
5335 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5336
5337 if (arg_setup_type == arg_setup_block &&
5338 calling->argc == 1 &&
5339 ISEQ_BODY(iseq)->param.flags.has_lead &&
5340 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5341 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5342 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5343 }
5344
5345 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5346 if (arg_setup_type == arg_setup_block) {
5347 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5348 int i;
5349 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5350 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5351 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5352 }
5353 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5354 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5355 }
5356 }
5357 else {
5358 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5359 }
5360 }
5361
5362 return 0;
5363 }
5364 else {
5365 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5366 }
5367}
5368
5369static int
5370vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5371{
5372 struct rb_calling_info calling_entry, *calling;
5373
5374 calling = &calling_entry;
5375 calling->argc = argc;
5376 calling->block_handler = block_handler;
5377 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5378 calling->recv = Qundef;
5379 calling->heap_argv = 0;
5380 calling->cc = NULL;
5381 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5382
5383 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5384}
5385
5386/* ruby iseq -> ruby block */
5387
5388static VALUE
5389vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5390 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5391 bool is_lambda, VALUE block_handler)
5392{
5393 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5394 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5395 const int arg_size = ISEQ_BODY(iseq)->param.size;
5396 VALUE * const rsp = GET_SP() - calling->argc;
5397 VALUE * const argv = rsp;
5398 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5399 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5400
5401 SET_SP(rsp);
5402
5403 vm_push_frame(ec, iseq,
5404 frame_flag,
5405 captured->self,
5406 VM_GUARDED_PREV_EP(captured->ep), 0,
5407 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5408 rsp + arg_size,
5409 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5410
5411 return Qundef;
5412}
5413
5414static VALUE
5415vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5416 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5417 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5418{
5419 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5420 int flags = vm_ci_flag(ci);
5421
5422 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5423 ((calling->argc == 0) ||
5424 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5425 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5426 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5427 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5428 flags = 0;
5429 if (UNLIKELY(calling->heap_argv)) {
5430#if VM_ARGC_STACK_MAX < 0
5431 if (RARRAY_LEN(calling->heap_argv) < 1) {
5432 rb_raise(rb_eArgError, "no receiver given");
5433 }
5434#endif
5435 calling->recv = rb_ary_shift(calling->heap_argv);
5436 // Modify stack to avoid cfp consistency error
5437 reg_cfp->sp++;
5438 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5439 reg_cfp->sp[-2] = calling->recv;
5440 flags |= VM_CALL_ARGS_SPLAT;
5441 }
5442 else {
5443 if (calling->argc < 1) {
5444 rb_raise(rb_eArgError, "no receiver given");
5445 }
5446 calling->recv = TOPN(--calling->argc);
5447 }
5448 if (calling->kw_splat) {
5449 flags |= VM_CALL_KW_SPLAT;
5450 }
5451 }
5452 else {
5453 if (calling->argc < 1) {
5454 rb_raise(rb_eArgError, "no receiver given");
5455 }
5456 calling->recv = TOPN(--calling->argc);
5457 }
5458
5459 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5460}
5461
5462static VALUE
5463vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5464 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5465 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5466{
5467 VALUE val;
5468 int argc;
5469 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5470 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5471 argc = calling->argc;
5472 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5473 POPN(argc); /* TODO: should put before C/yield? */
5474 return val;
5475}
5476
5477static VALUE
5478vm_proc_to_block_handler(VALUE procval)
5479{
5480 const struct rb_block *block = vm_proc_block(procval);
5481
5482 switch (vm_block_type(block)) {
5483 case block_type_iseq:
5484 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5485 case block_type_ifunc:
5486 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5487 case block_type_symbol:
5488 return VM_BH_FROM_SYMBOL(block->as.symbol);
5489 case block_type_proc:
5490 return VM_BH_FROM_PROC(block->as.proc);
5491 }
5492 VM_UNREACHABLE(vm_yield_with_proc);
5493 return Qundef;
5494}
5495
5496static VALUE
5497vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5498 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5499 bool is_lambda, VALUE block_handler)
5500{
5501 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5502 VALUE proc = VM_BH_TO_PROC(block_handler);
5503 is_lambda = block_proc_is_lambda(proc);
5504 block_handler = vm_proc_to_block_handler(proc);
5505 }
5506
5507 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5508}
5509
5510static inline VALUE
5511vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5512 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5513 bool is_lambda, VALUE block_handler)
5514{
5515 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5516 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5517 bool is_lambda, VALUE block_handler);
5518
5519 switch (vm_block_handler_type(block_handler)) {
5520 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5521 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5522 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5523 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5524 default: rb_bug("vm_invoke_block: unreachable");
5525 }
5526
5527 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5528}
5529
5530static VALUE
5531vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5532{
5533 const rb_execution_context_t *ec = GET_EC();
5534 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5535 struct rb_captured_block *captured;
5536
5537 if (cfp == 0) {
5538 rb_bug("vm_make_proc_with_iseq: unreachable");
5539 }
5540
5541 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5542 captured->code.iseq = blockiseq;
5543
5544 return rb_vm_make_proc(ec, captured, rb_cProc);
5545}
5546
5547static VALUE
5548vm_once_exec(VALUE iseq)
5549{
5550 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5551 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5552}
5553
5554static VALUE
5555vm_once_clear(VALUE data)
5556{
5557 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5558 is->once.running_thread = NULL;
5559 return Qnil;
5560}
5561
5562/* defined insn */
5563
5564static bool
5565check_respond_to_missing(VALUE obj, VALUE v)
5566{
5567 VALUE args[2];
5568 VALUE r;
5569
5570 args[0] = obj; args[1] = Qfalse;
5571 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5572 if (!UNDEF_P(r) && RTEST(r)) {
5573 return true;
5574 }
5575 else {
5576 return false;
5577 }
5578}
5579
5580static bool
5581vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5582{
5583 VALUE klass;
5584 enum defined_type type = (enum defined_type)op_type;
5585
5586 switch (type) {
5587 case DEFINED_IVAR:
5588 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5589 break;
5590 case DEFINED_GVAR:
5591 return rb_gvar_defined(SYM2ID(obj));
5592 break;
5593 case DEFINED_CVAR: {
5594 const rb_cref_t *cref = vm_get_cref(GET_EP());
5595 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5596 return rb_cvar_defined(klass, SYM2ID(obj));
5597 break;
5598 }
5599 case DEFINED_CONST:
5600 case DEFINED_CONST_FROM: {
5601 bool allow_nil = type == DEFINED_CONST;
5602 klass = v;
5603 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5604 break;
5605 }
5606 case DEFINED_FUNC:
5607 klass = CLASS_OF(v);
5608 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5609 break;
5610 case DEFINED_METHOD:{
5611 VALUE klass = CLASS_OF(v);
5612 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5613
5614 if (me) {
5615 switch (METHOD_ENTRY_VISI(me)) {
5616 case METHOD_VISI_PRIVATE:
5617 break;
5618 case METHOD_VISI_PROTECTED:
5619 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5620 break;
5621 }
5622 case METHOD_VISI_PUBLIC:
5623 return true;
5624 break;
5625 default:
5626 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5627 }
5628 }
5629 else {
5630 return check_respond_to_missing(obj, v);
5631 }
5632 break;
5633 }
5634 case DEFINED_YIELD:
5635 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5636 return true;
5637 }
5638 break;
5639 case DEFINED_ZSUPER:
5640 {
5641 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5642
5643 if (me) {
5644 VALUE klass = vm_search_normal_superclass(me->defined_class);
5645 if (!klass) return false;
5646
5647 ID id = me->def->original_id;
5648
5649 return rb_method_boundp(klass, id, 0);
5650 }
5651 }
5652 break;
5653 case DEFINED_REF:
5654 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5655 default:
5656 rb_bug("unimplemented defined? type (VM)");
5657 break;
5658 }
5659
5660 return false;
5661}
5662
5663bool
5664rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5665{
5666 return vm_defined(ec, reg_cfp, op_type, obj, v);
5667}
5668
5669static const VALUE *
5670vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5671{
5672 rb_num_t i;
5673 const VALUE *ep = reg_ep;
5674 for (i = 0; i < lv; i++) {
5675 ep = GET_PREV_EP(ep);
5676 }
5677 return ep;
5678}
5679
5680static VALUE
5681vm_get_special_object(const VALUE *const reg_ep,
5682 enum vm_special_object_type type)
5683{
5684 switch (type) {
5685 case VM_SPECIAL_OBJECT_VMCORE:
5686 return rb_mRubyVMFrozenCore;
5687 case VM_SPECIAL_OBJECT_CBASE:
5688 return vm_get_cbase(reg_ep);
5689 case VM_SPECIAL_OBJECT_CONST_BASE:
5690 return vm_get_const_base(reg_ep);
5691 default:
5692 rb_bug("putspecialobject insn: unknown value_type %d", type);
5693 }
5694}
5695
5696// ZJIT implementation is using the C function
5697// and needs to call a non-static function
5698VALUE
5699rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5700{
5701 return vm_get_special_object(reg_ep, type);
5702}
5703
5704static VALUE
5705vm_concat_array(VALUE ary1, VALUE ary2st)
5706{
5707 const VALUE ary2 = ary2st;
5708 VALUE tmp1 = rb_check_to_array(ary1);
5709 VALUE tmp2 = rb_check_to_array(ary2);
5710
5711 if (NIL_P(tmp1)) {
5712 tmp1 = rb_ary_new3(1, ary1);
5713 }
5714 if (tmp1 == ary1) {
5715 tmp1 = rb_ary_dup(ary1);
5716 }
5717
5718 if (NIL_P(tmp2)) {
5719 return rb_ary_push(tmp1, ary2);
5720 }
5721 else {
5722 return rb_ary_concat(tmp1, tmp2);
5723 }
5724}
5725
5726static VALUE
5727vm_concat_to_array(VALUE ary1, VALUE ary2st)
5728{
5729 /* ary1 must be a newly created array */
5730 const VALUE ary2 = ary2st;
5731
5732 if (NIL_P(ary2)) return ary1;
5733
5734 VALUE tmp2 = rb_check_to_array(ary2);
5735
5736 if (NIL_P(tmp2)) {
5737 return rb_ary_push(ary1, ary2);
5738 }
5739 else {
5740 return rb_ary_concat(ary1, tmp2);
5741 }
5742}
5743
5744// YJIT implementation is using the C function
5745// and needs to call a non-static function
5746VALUE
5747rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5748{
5749 return vm_concat_array(ary1, ary2st);
5750}
5751
5752VALUE
5753rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5754{
5755 return vm_concat_to_array(ary1, ary2st);
5756}
5757
5758static VALUE
5759vm_splat_array(VALUE flag, VALUE ary)
5760{
5761 if (NIL_P(ary)) {
5762 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5763 }
5764 VALUE tmp = rb_check_to_array(ary);
5765 if (NIL_P(tmp)) {
5766 return rb_ary_new3(1, ary);
5767 }
5768 else if (RTEST(flag)) {
5769 return rb_ary_dup(tmp);
5770 }
5771 else {
5772 return tmp;
5773 }
5774}
5775
5776// YJIT implementation is using the C function
5777// and needs to call a non-static function
5778VALUE
5779rb_vm_splat_array(VALUE flag, VALUE ary)
5780{
5781 return vm_splat_array(flag, ary);
5782}
5783
5784static VALUE
5785vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5786{
5787 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5788
5789 if (flag & VM_CHECKMATCH_ARRAY) {
5790 long i;
5791 const long n = RARRAY_LEN(pattern);
5792
5793 for (i = 0; i < n; i++) {
5794 VALUE v = RARRAY_AREF(pattern, i);
5795 VALUE c = check_match(ec, v, target, type);
5796
5797 if (RTEST(c)) {
5798 return c;
5799 }
5800 }
5801 return Qfalse;
5802 }
5803 else {
5804 return check_match(ec, pattern, target, type);
5805 }
5806}
5807
5808VALUE
5809rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5810{
5811 return vm_check_match(ec, target, pattern, flag);
5812}
5813
5814static VALUE
5815vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5816{
5817 const VALUE kw_bits = *(ep - bits);
5818
5819 if (FIXNUM_P(kw_bits)) {
5820 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5821 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5822 return Qfalse;
5823 }
5824 else {
5825 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5826 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5827 }
5828 return Qtrue;
5829}
5830
5831static void
5832vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5833{
5834 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5835 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5836 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5837 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5838
5839 switch (flag) {
5840 case RUBY_EVENT_CALL:
5841 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5842 return;
5843 case RUBY_EVENT_C_CALL:
5844 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5845 return;
5846 case RUBY_EVENT_RETURN:
5847 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5848 return;
5850 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5851 return;
5852 }
5853 }
5854}
5855
5856static VALUE
5857vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5858{
5859 if (!rb_const_defined_at(cbase, id)) {
5860 return 0;
5861 }
5862 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5863 return rb_public_const_get_at(cbase, id);
5864 }
5865 else {
5866 return rb_const_get_at(cbase, id);
5867 }
5868}
5869
5870static VALUE
5871vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5872{
5873 if (!RB_TYPE_P(klass, T_CLASS)) {
5874 return 0;
5875 }
5876 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5877 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5878
5879 if (tmp != super) {
5880 rb_raise(rb_eTypeError,
5881 "superclass mismatch for class %"PRIsVALUE"",
5882 rb_id2str(id));
5883 }
5884 else {
5885 return klass;
5886 }
5887 }
5888 else {
5889 return klass;
5890 }
5891}
5892
5893static VALUE
5894vm_check_if_module(ID id, VALUE mod)
5895{
5896 if (!RB_TYPE_P(mod, T_MODULE)) {
5897 return 0;
5898 }
5899 else {
5900 return mod;
5901 }
5902}
5903
5904static VALUE
5905declare_under(ID id, VALUE cbase, VALUE c)
5906{
5907 rb_set_class_path_string(c, cbase, rb_id2str(id));
5908 rb_const_set(cbase, id, c);
5909 return c;
5910}
5911
5912static VALUE
5913vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5914{
5915 /* new class declaration */
5916 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5917 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5919 rb_class_inherited(s, c);
5920 return c;
5921}
5922
5923static VALUE
5924vm_declare_module(ID id, VALUE cbase)
5925{
5926 /* new module declaration */
5927 return declare_under(id, cbase, rb_module_new());
5928}
5929
5930NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5931static void
5932unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5933{
5934 VALUE name = rb_id2str(id);
5935 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5936 name, type);
5937 VALUE location = rb_const_source_location_at(cbase, id);
5938 if (!NIL_P(location)) {
5939 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5940 " previous definition of %"PRIsVALUE" was here",
5941 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5942 }
5944}
5945
5946static VALUE
5947vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5948{
5949 VALUE klass;
5950
5951 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5952 rb_raise(rb_eTypeError,
5953 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5954 rb_obj_class(super));
5955 }
5956
5957 vm_check_if_namespace(cbase);
5958
5959 /* find klass */
5960 rb_autoload_load(cbase, id);
5961
5962 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5963 if (!vm_check_if_class(id, flags, super, klass))
5964 unmatched_redefinition("class", cbase, id, klass);
5965 return klass;
5966 }
5967 else {
5968 return vm_declare_class(id, flags, cbase, super);
5969 }
5970}
5971
5972static VALUE
5973vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5974{
5975 VALUE mod;
5976
5977 vm_check_if_namespace(cbase);
5978 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5979 if (!vm_check_if_module(id, mod))
5980 unmatched_redefinition("module", cbase, id, mod);
5981 return mod;
5982 }
5983 else {
5984 return vm_declare_module(id, cbase);
5985 }
5986}
5987
5988static VALUE
5989vm_find_or_create_class_by_id(ID id,
5990 rb_num_t flags,
5991 VALUE cbase,
5992 VALUE super)
5993{
5994 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5995
5996 switch (type) {
5997 case VM_DEFINECLASS_TYPE_CLASS:
5998 /* classdef returns class scope value */
5999 return vm_define_class(id, flags, cbase, super);
6000
6001 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
6002 /* classdef returns class scope value */
6003 return rb_singleton_class(cbase);
6004
6005 case VM_DEFINECLASS_TYPE_MODULE:
6006 /* classdef returns class scope value */
6007 return vm_define_module(id, flags, cbase);
6008
6009 default:
6010 rb_bug("unknown defineclass type: %d", (int)type);
6011 }
6012}
6013
6014static rb_method_visibility_t
6015vm_scope_visibility_get(const rb_execution_context_t *ec)
6016{
6017 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6018
6019 if (!vm_env_cref_by_cref(cfp->ep)) {
6020 return METHOD_VISI_PUBLIC;
6021 }
6022 else {
6023 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6024 }
6025}
6026
6027static int
6028vm_scope_module_func_check(const rb_execution_context_t *ec)
6029{
6030 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6031
6032 if (!vm_env_cref_by_cref(cfp->ep)) {
6033 return FALSE;
6034 }
6035 else {
6036 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6037 }
6038}
6039
6040static void
6041vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6042{
6043 VALUE klass;
6044 rb_method_visibility_t visi;
6045 rb_cref_t *cref = vm_ec_cref(ec);
6046
6047 if (is_singleton) {
6048 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6049 visi = METHOD_VISI_PUBLIC;
6050 }
6051 else {
6052 klass = CREF_CLASS_FOR_DEFINITION(cref);
6053 visi = vm_scope_visibility_get(ec);
6054 }
6055
6056 if (NIL_P(klass)) {
6057 rb_raise(rb_eTypeError, "no class/module to add method");
6058 }
6059
6060 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6061 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6062 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6063 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6064 }
6065
6066 if (!is_singleton && vm_scope_module_func_check(ec)) {
6067 klass = rb_singleton_class(klass);
6068 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6069 }
6070}
6071
6072static VALUE
6073vm_invokeblock_i(struct rb_execution_context_struct *ec,
6074 struct rb_control_frame_struct *reg_cfp,
6075 struct rb_calling_info *calling)
6076{
6077 const struct rb_callinfo *ci = calling->cd->ci;
6078 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6079
6080 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6081 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6082 }
6083 else {
6084 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6085 }
6086}
6087
6088enum method_explorer_type {
6089 mexp_search_method,
6090 mexp_search_invokeblock,
6091 mexp_search_super,
6092};
6093
6094static inline VALUE
6095vm_sendish(
6096 struct rb_execution_context_struct *ec,
6097 struct rb_control_frame_struct *reg_cfp,
6098 struct rb_call_data *cd,
6099 VALUE block_handler,
6100 enum method_explorer_type method_explorer
6101) {
6102 VALUE val = Qundef;
6103 const struct rb_callinfo *ci = cd->ci;
6104 const struct rb_callcache *cc;
6105 int argc = vm_ci_argc(ci);
6106 VALUE recv = TOPN(argc);
6107 struct rb_calling_info calling = {
6108 .block_handler = block_handler,
6109 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6110 .recv = recv,
6111 .argc = argc,
6112 .cd = cd,
6113 };
6114
6115 switch (method_explorer) {
6116 case mexp_search_method:
6117 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6118 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6119 break;
6120 case mexp_search_super:
6121 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6122 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6123 break;
6124 case mexp_search_invokeblock:
6125 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6126 break;
6127 }
6128 return val;
6129}
6130
6131VALUE
6132rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6133{
6134 stack_check(ec);
6135 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6136 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6137 VM_EXEC(ec, val);
6138 return val;
6139}
6140
6141VALUE
6142rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6143{
6144 stack_check(ec);
6145
6146 struct rb_forwarding_call_data adjusted_cd;
6147 struct rb_callinfo adjusted_ci;
6148
6149 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6150
6151 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6152
6153 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6154 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6155 }
6156
6157 VM_EXEC(ec, val);
6158 return val;
6159}
6160
6161VALUE
6162rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6163{
6164 stack_check(ec);
6165 VALUE bh = VM_BLOCK_HANDLER_NONE;
6166 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6167 VM_EXEC(ec, val);
6168 return val;
6169}
6170
6171VALUE
6172rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6173{
6174 stack_check(ec);
6175
6176 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6177 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6178
6179 VM_EXEC(ec, val);
6180 return val;
6181}
6182
6183VALUE
6184rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6185{
6186 stack_check(ec);
6187 struct rb_forwarding_call_data adjusted_cd;
6188 struct rb_callinfo adjusted_ci;
6189
6190 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6191
6192 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6193
6194 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6195 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6196 }
6197
6198 VM_EXEC(ec, val);
6199 return val;
6200}
6201
6202VALUE
6203rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6204{
6205 stack_check(ec);
6206 VALUE bh = VM_BLOCK_HANDLER_NONE;
6207 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6208 VM_EXEC(ec, val);
6209 return val;
6210}
6211
6212/* object.c */
6213VALUE rb_nil_to_s(VALUE);
6214VALUE rb_true_to_s(VALUE);
6215VALUE rb_false_to_s(VALUE);
6216/* numeric.c */
6217VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6218VALUE rb_fix_to_s(VALUE);
6219/* variable.c */
6220VALUE rb_mod_to_s(VALUE);
6222
6223static VALUE
6224vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6225{
6226 int type = TYPE(recv);
6227 if (type == T_STRING) {
6228 return recv;
6229 }
6230
6231 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
6232
6233 switch (type) {
6234 case T_SYMBOL:
6235 if (check_method_basic_definition(cme)) {
6236 // rb_sym_to_s() allocates a mutable string, but since we are only
6237 // going to use this string for interpolation, it's fine to use the
6238 // frozen string.
6239 return rb_sym2str(recv);
6240 }
6241 break;
6242 case T_MODULE:
6243 case T_CLASS:
6244 if (check_cfunc(cme, rb_mod_to_s)) {
6245 // rb_mod_to_s() allocates a mutable string, but since we are only
6246 // going to use this string for interpolation, it's fine to use the
6247 // frozen string.
6248 VALUE val = rb_mod_name(recv);
6249 if (NIL_P(val)) {
6250 val = rb_mod_to_s(recv);
6251 }
6252 return val;
6253 }
6254 break;
6255 case T_NIL:
6256 if (check_cfunc(cme, rb_nil_to_s)) {
6257 return rb_nil_to_s(recv);
6258 }
6259 break;
6260 case T_TRUE:
6261 if (check_cfunc(cme, rb_true_to_s)) {
6262 return rb_true_to_s(recv);
6263 }
6264 break;
6265 case T_FALSE:
6266 if (check_cfunc(cme, rb_false_to_s)) {
6267 return rb_false_to_s(recv);
6268 }
6269 break;
6270 case T_FIXNUM:
6271 if (check_cfunc(cme, rb_int_to_s)) {
6272 return rb_fix_to_s(recv);
6273 }
6274 break;
6275 }
6276 return Qundef;
6277}
6278
6279// ZJIT implementation is using the C function
6280// and needs to call a non-static function
6281VALUE
6282rb_vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6283{
6284 return vm_objtostring(iseq, recv, cd);
6285}
6286
6287static VALUE
6288vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6289{
6290 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6291 return ary;
6292 }
6293 else {
6294 return Qundef;
6295 }
6296}
6297
6298static VALUE
6299vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6300{
6301 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6302 return hash;
6303 }
6304 else {
6305 return Qundef;
6306 }
6307}
6308
6309static VALUE
6310vm_opt_str_freeze(VALUE str, int bop, ID id)
6311{
6312 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6313 return str;
6314 }
6315 else {
6316 return Qundef;
6317 }
6318}
6319
6320/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6321#define id_cmp idCmp
6322
6323static VALUE
6324vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6325{
6326 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6327 return rb_ary_includes(ary, target);
6328 }
6329 else {
6330 VALUE args[1] = {target};
6331
6332 // duparray
6333 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6334 VALUE dupary = rb_ary_resurrect(ary);
6335
6336 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6337 }
6338}
6339
6340VALUE
6341rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6342{
6343 return vm_opt_duparray_include_p(ec, ary, target);
6344}
6345
6346static VALUE
6347vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6348{
6349 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6350 if (num == 0) {
6351 return Qnil;
6352 }
6353 else {
6354 VALUE result = *ptr;
6355 rb_snum_t i = num - 1;
6356 while (i-- > 0) {
6357 const VALUE v = *++ptr;
6358 if (OPTIMIZED_CMP(v, result) > 0) {
6359 result = v;
6360 }
6361 }
6362 return result;
6363 }
6364 }
6365 else {
6366 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6367 }
6368}
6369
6370VALUE
6371rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6372{
6373 return vm_opt_newarray_max(ec, num, ptr);
6374}
6375
6376static VALUE
6377vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6378{
6379 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6380 if (num == 0) {
6381 return Qnil;
6382 }
6383 else {
6384 VALUE result = *ptr;
6385 rb_snum_t i = num - 1;
6386 while (i-- > 0) {
6387 const VALUE v = *++ptr;
6388 if (OPTIMIZED_CMP(v, result) < 0) {
6389 result = v;
6390 }
6391 }
6392 return result;
6393 }
6394 }
6395 else {
6396 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6397 }
6398}
6399
6400VALUE
6401rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6402{
6403 return vm_opt_newarray_min(ec, num, ptr);
6404}
6405
6406static VALUE
6407vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6408{
6409 // If Array#hash is _not_ monkeypatched, use the optimized call
6410 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6411 return rb_ary_hash_values(num, ptr);
6412 }
6413 else {
6414 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6415 }
6416}
6417
6418VALUE
6419rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6420{
6421 return vm_opt_newarray_hash(ec, num, ptr);
6422}
6423
6424VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6425VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6426
6427static VALUE
6428vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6429{
6430 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6431 struct RArray fake_ary = {RBASIC_INIT};
6432 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6433 return rb_ary_includes(ary, target);
6434 }
6435 else {
6436 VALUE args[1] = {target};
6437 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6438 }
6439}
6440
6441VALUE
6442rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6443{
6444 return vm_opt_newarray_include_p(ec, num, ptr, target);
6445}
6446
6447static VALUE
6448vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6449{
6450 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6451 struct RArray fake_ary = {RBASIC_INIT};
6452 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6453 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6454 }
6455 else {
6456 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6457 // Setup an array with room for keyword hash.
6458 VALUE args[2];
6459 args[0] = fmt;
6460 int kw_splat = RB_NO_KEYWORDS;
6461 int argc = 1;
6462
6463 if (!UNDEF_P(buffer)) {
6464 args[1] = rb_hash_new_with_size(1);
6465 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6466 kw_splat = RB_PASS_KEYWORDS;
6467 argc++;
6468 }
6469
6470 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6471 }
6472}
6473
6474VALUE
6475rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6476{
6477 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6478}
6479
6480VALUE
6481rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6482{
6483 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6484}
6485
6486#undef id_cmp
6487
6488static void
6489vm_track_constant_cache(ID id, void *ic)
6490{
6491 rb_vm_t *vm = GET_VM();
6492 struct rb_id_table *const_cache = vm->constant_cache;
6493 VALUE lookup_result;
6494 set_table *ics;
6495
6496 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6497 ics = (set_table *)lookup_result;
6498 }
6499 else {
6500 ics = set_init_numtable();
6501 rb_id_table_insert(const_cache, id, (VALUE)ics);
6502 }
6503
6504 /* The call below to st_insert could allocate which could trigger a GC.
6505 * If it triggers a GC, it may free an iseq that also holds a cache to this
6506 * constant. If that iseq is the last iseq with a cache to this constant, then
6507 * it will free this ST table, which would cause an use-after-free during this
6508 * st_insert.
6509 *
6510 * So to fix this issue, we store the ID that is currently being inserted
6511 * and, in remove_from_constant_cache, we don't free the ST table for ID
6512 * equal to this one.
6513 *
6514 * See [Bug #20921].
6515 */
6516 vm->inserting_constant_cache_id = id;
6517
6518 set_insert(ics, (st_data_t)ic);
6519
6520 vm->inserting_constant_cache_id = (ID)0;
6521}
6522
6523static void
6524vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6525{
6526 RB_VM_LOCKING() {
6527 for (int i = 0; segments[i]; i++) {
6528 ID id = segments[i];
6529 if (id == idNULL) continue;
6530 vm_track_constant_cache(id, ic);
6531 }
6532 }
6533}
6534
6535// For JIT inlining
6536static inline bool
6537vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6538{
6539 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6540 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6541
6542 return (ic_cref == NULL || // no need to check CREF
6543 ic_cref == vm_get_cref(reg_ep));
6544 }
6545 return false;
6546}
6547
6548static bool
6549vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6550{
6551 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6552 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6553}
6554
6555// YJIT needs this function to never allocate and never raise
6556bool
6557rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6558{
6559 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6560}
6561
6562static void
6563vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6564{
6565 if (ruby_vm_const_missing_count > 0) {
6566 ruby_vm_const_missing_count = 0;
6567 ic->entry = NULL;
6568 return;
6569 }
6570
6571 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6572 RB_OBJ_WRITE(ice, &ice->value, val);
6573 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6574 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6575 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6576
6577 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6578 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6579 rb_yjit_constant_ic_update(iseq, ic, pos);
6580}
6581
6582VALUE
6583rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6584{
6585 VALUE val;
6586 const ID *segments = ic->segments;
6587 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6588 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6589 val = ice->value;
6590
6591 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6592 }
6593 else {
6594 ruby_vm_constant_cache_misses++;
6595 val = vm_get_ev_const_chain(ec, segments);
6596 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6597 // Undo the PC increment to get the address to this instruction
6598 // INSN_ATTR(width) == 2
6599 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6600 }
6601 return val;
6602}
6603
6604static VALUE
6605vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6606{
6607 rb_thread_t *th = rb_ec_thread_ptr(ec);
6608 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6609
6610 again:
6611 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6612 return is->once.value;
6613 }
6614 else if (is->once.running_thread == NULL) {
6615 VALUE val;
6616 is->once.running_thread = th;
6617 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6618 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6619 /* is->once.running_thread is cleared by vm_once_clear() */
6620 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6621 return val;
6622 }
6623 else if (is->once.running_thread == th) {
6624 /* recursive once */
6625 return vm_once_exec((VALUE)iseq);
6626 }
6627 else {
6628 /* waiting for finish */
6629 RUBY_VM_CHECK_INTS(ec);
6631 goto again;
6632 }
6633}
6634
6635static OFFSET
6636vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6637{
6638 switch (OBJ_BUILTIN_TYPE(key)) {
6639 case -1:
6640 case T_FLOAT:
6641 case T_SYMBOL:
6642 case T_BIGNUM:
6643 case T_STRING:
6644 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6645 SYMBOL_REDEFINED_OP_FLAG |
6646 INTEGER_REDEFINED_OP_FLAG |
6647 FLOAT_REDEFINED_OP_FLAG |
6648 NIL_REDEFINED_OP_FLAG |
6649 TRUE_REDEFINED_OP_FLAG |
6650 FALSE_REDEFINED_OP_FLAG |
6651 STRING_REDEFINED_OP_FLAG)) {
6652 st_data_t val;
6653 if (RB_FLOAT_TYPE_P(key)) {
6654 double kval = RFLOAT_VALUE(key);
6655 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6656 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6657 }
6658 }
6659 if (rb_hash_stlike_lookup(hash, key, &val)) {
6660 return FIX2LONG((VALUE)val);
6661 }
6662 else {
6663 return else_offset;
6664 }
6665 }
6666 }
6667 return 0;
6668}
6669
6670NORETURN(static void
6671 vm_stack_consistency_error(const rb_execution_context_t *ec,
6672 const rb_control_frame_t *,
6673 const VALUE *));
6674static void
6675vm_stack_consistency_error(const rb_execution_context_t *ec,
6676 const rb_control_frame_t *cfp,
6677 const VALUE *bp)
6678{
6679 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6680 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6681 static const char stack_consistency_error[] =
6682 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6683#if defined RUBY_DEVEL
6684 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6685 rb_str_cat_cstr(mesg, "\n");
6686 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6688#else
6689 rb_bug(stack_consistency_error, nsp, nbp);
6690#endif
6691}
6692
6693static VALUE
6694vm_opt_plus(VALUE recv, VALUE obj)
6695{
6696 if (FIXNUM_2_P(recv, obj) &&
6697 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6698 return rb_fix_plus_fix(recv, obj);
6699 }
6700 else if (FLONUM_2_P(recv, obj) &&
6701 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6702 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6703 }
6704 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6705 return Qundef;
6706 }
6707 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6708 RBASIC_CLASS(obj) == rb_cFloat &&
6709 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6710 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6711 }
6712 else if (RBASIC_CLASS(recv) == rb_cString &&
6713 RBASIC_CLASS(obj) == rb_cString &&
6714 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6715 return rb_str_opt_plus(recv, obj);
6716 }
6717 else if (RBASIC_CLASS(recv) == rb_cArray &&
6718 RBASIC_CLASS(obj) == rb_cArray &&
6719 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6720 return rb_ary_plus(recv, obj);
6721 }
6722 else {
6723 return Qundef;
6724 }
6725}
6726
6727static VALUE
6728vm_opt_minus(VALUE recv, VALUE obj)
6729{
6730 if (FIXNUM_2_P(recv, obj) &&
6731 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6732 return rb_fix_minus_fix(recv, obj);
6733 }
6734 else if (FLONUM_2_P(recv, obj) &&
6735 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6736 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6737 }
6738 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6739 return Qundef;
6740 }
6741 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6742 RBASIC_CLASS(obj) == rb_cFloat &&
6743 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6744 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6745 }
6746 else {
6747 return Qundef;
6748 }
6749}
6750
6751static VALUE
6752vm_opt_mult(VALUE recv, VALUE obj)
6753{
6754 if (FIXNUM_2_P(recv, obj) &&
6755 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6756 return rb_fix_mul_fix(recv, obj);
6757 }
6758 else if (FLONUM_2_P(recv, obj) &&
6759 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6760 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6761 }
6762 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6763 return Qundef;
6764 }
6765 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6766 RBASIC_CLASS(obj) == rb_cFloat &&
6767 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6768 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6769 }
6770 else {
6771 return Qundef;
6772 }
6773}
6774
6775static VALUE
6776vm_opt_div(VALUE recv, VALUE obj)
6777{
6778 if (FIXNUM_2_P(recv, obj) &&
6779 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6780 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6781 }
6782 else if (FLONUM_2_P(recv, obj) &&
6783 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6784 return rb_flo_div_flo(recv, obj);
6785 }
6786 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6787 return Qundef;
6788 }
6789 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6790 RBASIC_CLASS(obj) == rb_cFloat &&
6791 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6792 return rb_flo_div_flo(recv, obj);
6793 }
6794 else {
6795 return Qundef;
6796 }
6797}
6798
6799static VALUE
6800vm_opt_mod(VALUE recv, VALUE obj)
6801{
6802 if (FIXNUM_2_P(recv, obj) &&
6803 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6804 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6805 }
6806 else if (FLONUM_2_P(recv, obj) &&
6807 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6808 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6809 }
6810 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6811 return Qundef;
6812 }
6813 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6814 RBASIC_CLASS(obj) == rb_cFloat &&
6815 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6816 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6817 }
6818 else {
6819 return Qundef;
6820 }
6821}
6822
6823static VALUE
6824vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6825{
6826 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6827 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6828
6829 if (!UNDEF_P(val)) {
6830 return RBOOL(!RTEST(val));
6831 }
6832 }
6833
6834 return Qundef;
6835}
6836
6837static VALUE
6838vm_opt_lt(VALUE recv, VALUE obj)
6839{
6840 if (FIXNUM_2_P(recv, obj) &&
6841 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6842 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6843 }
6844 else if (FLONUM_2_P(recv, obj) &&
6845 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6846 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6847 }
6848 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6849 return Qundef;
6850 }
6851 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6852 RBASIC_CLASS(obj) == rb_cFloat &&
6853 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6854 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6855 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6856 }
6857 else {
6858 return Qundef;
6859 }
6860}
6861
6862static VALUE
6863vm_opt_le(VALUE recv, VALUE obj)
6864{
6865 if (FIXNUM_2_P(recv, obj) &&
6866 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6867 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6868 }
6869 else if (FLONUM_2_P(recv, obj) &&
6870 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6871 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6872 }
6873 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6874 return Qundef;
6875 }
6876 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6877 RBASIC_CLASS(obj) == rb_cFloat &&
6878 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6879 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6880 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6881 }
6882 else {
6883 return Qundef;
6884 }
6885}
6886
6887static VALUE
6888vm_opt_gt(VALUE recv, VALUE obj)
6889{
6890 if (FIXNUM_2_P(recv, obj) &&
6891 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6892 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6893 }
6894 else if (FLONUM_2_P(recv, obj) &&
6895 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6896 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6897 }
6898 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6899 return Qundef;
6900 }
6901 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6902 RBASIC_CLASS(obj) == rb_cFloat &&
6903 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6904 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6905 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6906 }
6907 else {
6908 return Qundef;
6909 }
6910}
6911
6912static VALUE
6913vm_opt_ge(VALUE recv, VALUE obj)
6914{
6915 if (FIXNUM_2_P(recv, obj) &&
6916 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6917 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6918 }
6919 else if (FLONUM_2_P(recv, obj) &&
6920 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6921 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6922 }
6923 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6924 return Qundef;
6925 }
6926 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6927 RBASIC_CLASS(obj) == rb_cFloat &&
6928 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6929 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6930 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6931 }
6932 else {
6933 return Qundef;
6934 }
6935}
6936
6937
6938static VALUE
6939vm_opt_ltlt(VALUE recv, VALUE obj)
6940{
6941 if (SPECIAL_CONST_P(recv)) {
6942 return Qundef;
6943 }
6944 else if (RBASIC_CLASS(recv) == rb_cString &&
6945 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6946 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6947 return rb_str_buf_append(recv, obj);
6948 }
6949 else {
6950 return rb_str_concat(recv, obj);
6951 }
6952 }
6953 else if (RBASIC_CLASS(recv) == rb_cArray &&
6954 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6955 return rb_ary_push(recv, obj);
6956 }
6957 else {
6958 return Qundef;
6959 }
6960}
6961
6962static VALUE
6963vm_opt_and(VALUE recv, VALUE obj)
6964{
6965 // If recv and obj are both fixnums, then the bottom tag bit
6966 // will be 1 on both. 1 & 1 == 1, so the result value will also
6967 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6968 // will be 0, and we return Qundef.
6969 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6970
6971 if (FIXNUM_P(ret) &&
6972 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6973 return ret;
6974 }
6975 else {
6976 return Qundef;
6977 }
6978}
6979
6980static VALUE
6981vm_opt_or(VALUE recv, VALUE obj)
6982{
6983 if (FIXNUM_2_P(recv, obj) &&
6984 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6985 return recv | obj;
6986 }
6987 else {
6988 return Qundef;
6989 }
6990}
6991
6992static VALUE
6993vm_opt_aref(VALUE recv, VALUE obj)
6994{
6995 if (SPECIAL_CONST_P(recv)) {
6996 if (FIXNUM_2_P(recv, obj) &&
6997 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6998 return rb_fix_aref(recv, obj);
6999 }
7000 return Qundef;
7001 }
7002 else if (RBASIC_CLASS(recv) == rb_cArray &&
7003 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7004 if (FIXNUM_P(obj)) {
7005 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7006 }
7007 else {
7008 return rb_ary_aref1(recv, obj);
7009 }
7010 }
7011 else if (RBASIC_CLASS(recv) == rb_cHash &&
7012 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7013 return rb_hash_aref(recv, obj);
7014 }
7015 else {
7016 return Qundef;
7017 }
7018}
7019
7020static VALUE
7021vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7022{
7023 if (SPECIAL_CONST_P(recv)) {
7024 return Qundef;
7025 }
7026 else if (RBASIC_CLASS(recv) == rb_cArray &&
7027 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7028 FIXNUM_P(obj)) {
7029 rb_ary_store(recv, FIX2LONG(obj), set);
7030 return set;
7031 }
7032 else if (RBASIC_CLASS(recv) == rb_cHash &&
7033 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7034 rb_hash_aset(recv, obj, set);
7035 return set;
7036 }
7037 else {
7038 return Qundef;
7039 }
7040}
7041
7042static VALUE
7043vm_opt_length(VALUE recv, int bop)
7044{
7045 if (SPECIAL_CONST_P(recv)) {
7046 return Qundef;
7047 }
7048 else if (RBASIC_CLASS(recv) == rb_cString &&
7049 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7050 if (bop == BOP_EMPTY_P) {
7051 return LONG2NUM(RSTRING_LEN(recv));
7052 }
7053 else {
7054 return rb_str_length(recv);
7055 }
7056 }
7057 else if (RBASIC_CLASS(recv) == rb_cArray &&
7058 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7059 return LONG2NUM(RARRAY_LEN(recv));
7060 }
7061 else if (RBASIC_CLASS(recv) == rb_cHash &&
7062 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7063 return INT2FIX(RHASH_SIZE(recv));
7064 }
7065 else {
7066 return Qundef;
7067 }
7068}
7069
7070static VALUE
7071vm_opt_empty_p(VALUE recv)
7072{
7073 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7074 case Qundef: return Qundef;
7075 case INT2FIX(0): return Qtrue;
7076 default: return Qfalse;
7077 }
7078}
7079
7080VALUE rb_false(VALUE obj);
7081
7082static VALUE
7083vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7084{
7085 if (NIL_P(recv) &&
7086 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7087 return Qtrue;
7088 }
7089 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
7090 return Qfalse;
7091 }
7092 else {
7093 return Qundef;
7094 }
7095}
7096
7097static VALUE
7098fix_succ(VALUE x)
7099{
7100 switch (x) {
7101 case ~0UL:
7102 /* 0xFFFF_FFFF == INT2FIX(-1)
7103 * `-1.succ` is of course 0. */
7104 return INT2FIX(0);
7105 case RSHIFT(~0UL, 1):
7106 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7107 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7108 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7109 default:
7110 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7111 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7112 * == lx*2 + ly*2 + 1
7113 * == (lx*2+1) + (ly*2+1) - 1
7114 * == x + y - 1
7115 *
7116 * Here, if we put y := INT2FIX(1):
7117 *
7118 * == x + INT2FIX(1) - 1
7119 * == x + 2 .
7120 */
7121 return x + 2;
7122 }
7123}
7124
7125static VALUE
7126vm_opt_succ(VALUE recv)
7127{
7128 if (FIXNUM_P(recv) &&
7129 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7130 return fix_succ(recv);
7131 }
7132 else if (SPECIAL_CONST_P(recv)) {
7133 return Qundef;
7134 }
7135 else if (RBASIC_CLASS(recv) == rb_cString &&
7136 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7137 return rb_str_succ(recv);
7138 }
7139 else {
7140 return Qundef;
7141 }
7142}
7143
7144static VALUE
7145vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7146{
7147 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7148 return RBOOL(!RTEST(recv));
7149 }
7150 else {
7151 return Qundef;
7152 }
7153}
7154
7155static VALUE
7156vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7157{
7158 if (SPECIAL_CONST_P(recv)) {
7159 return Qundef;
7160 }
7161 else if (RBASIC_CLASS(recv) == rb_cString &&
7162 CLASS_OF(obj) == rb_cRegexp &&
7163 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7164 return rb_reg_match(obj, recv);
7165 }
7166 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7167 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7168 return rb_reg_match(recv, obj);
7169 }
7170 else {
7171 return Qundef;
7172 }
7173}
7174
7175rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7176
7177NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7178
7179static inline void
7180vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7181 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7182 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7183{
7184 rb_event_flag_t event = pc_events & target_event;
7185 VALUE self = GET_SELF();
7186
7187 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7188
7189 if (event & global_hooks->events) {
7190 /* increment PC because source line is calculated with PC-1 */
7191 reg_cfp->pc++;
7192 vm_dtrace(event, ec);
7193 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7194 reg_cfp->pc--;
7195 }
7196
7197 // Load here since global hook above can add and free local hooks
7198 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7199 if (local_hooks != NULL) {
7200 if (event & local_hooks->events) {
7201 /* increment PC because source line is calculated with PC-1 */
7202 reg_cfp->pc++;
7203 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7204 reg_cfp->pc--;
7205 }
7206 }
7207}
7208
7209#define VM_TRACE_HOOK(target_event, val) do { \
7210 if ((pc_events & (target_event)) & enabled_flags) { \
7211 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7212 } \
7213} while (0)
7214
7215static VALUE
7216rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7217{
7218 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7219 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7220 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7221}
7222
7223static void
7224vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7225{
7226 const VALUE *pc = reg_cfp->pc;
7227 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7228 rb_event_flag_t global_events = enabled_flags;
7229
7230 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7231 return;
7232 }
7233 else {
7234 const rb_iseq_t *iseq = reg_cfp->iseq;
7235 VALUE iseq_val = (VALUE)iseq;
7236 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7237 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7238 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7239 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7240 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7241 rb_hook_list_t *bmethod_local_hooks = NULL;
7242 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7243 rb_event_flag_t bmethod_local_events = 0;
7244 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7245 enabled_flags |= iseq_local_events;
7246
7247 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7248
7249 if (bmethod_frame) {
7250 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7251 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7252 bmethod_local_hooks = me->def->body.bmethod.hooks;
7253 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7254 if (bmethod_local_hooks) {
7255 bmethod_local_events = bmethod_local_hooks->events;
7256 }
7257 }
7258
7259
7260 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7261#if 0
7262 /* disable trace */
7263 /* TODO: incomplete */
7264 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7265#else
7266 /* do not disable trace because of performance problem
7267 * (re-enable overhead)
7268 */
7269#endif
7270 return;
7271 }
7272 else if (ec->trace_arg != NULL) {
7273 /* already tracing */
7274 return;
7275 }
7276 else {
7277 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7278 /* Note, not considering iseq local events here since the same
7279 * iseq could be used in multiple bmethods. */
7280 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7281
7282 if (0) {
7283 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7284 (int)pos,
7285 (int)pc_events,
7286 RSTRING_PTR(rb_iseq_path(iseq)),
7287 (int)rb_iseq_line_no(iseq, pos),
7288 RSTRING_PTR(rb_iseq_label(iseq)));
7289 }
7290 VM_ASSERT(reg_cfp->pc == pc);
7291 VM_ASSERT(pc_events != 0);
7292
7293 /* check traces */
7294 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7295 /* b_call instruction running as a method. Fire call event. */
7296 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7297 }
7299 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7300 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7301 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7302 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7303 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7304 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7305 /* b_return instruction running as a method. Fire return event. */
7306 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7307 }
7308
7309 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7310 // We need the pointer to stay valid in case compaction happens in a trace hook.
7311 //
7312 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7313 // storage for `rb_method_definition_t` is not on the GC heap.
7314 RB_GC_GUARD(iseq_val);
7315 }
7316 }
7317}
7318#undef VM_TRACE_HOOK
7319
7320#if VM_CHECK_MODE > 0
7321NORETURN( NOINLINE( COLDFUNC
7322void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7323
7324void
7325Init_vm_stack_canary(void)
7326{
7327 /* This has to be called _after_ our PRNG is properly set up. */
7328 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7329 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7330
7331 vm_stack_canary_was_born = true;
7332 VM_ASSERT(n == 0);
7333}
7334
7335void
7336rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7337{
7338 /* Because a method has already been called, why not call
7339 * another one. */
7340 const char *insn = rb_insns_name(i);
7341 VALUE inspection = rb_inspect(c);
7342 const char *str = StringValueCStr(inspection);
7343
7344 rb_bug("dead canary found at %s: %s", insn, str);
7345}
7346
7347#else
7348void Init_vm_stack_canary(void) { /* nothing to do */ }
7349#endif
7350
7351
7352/* a part of the following code is generated by this ruby script:
7353
735416.times{|i|
7355 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7356 typedef_args.prepend(", ") if i != 0
7357 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7358 call_args.prepend(", ") if i != 0
7359 puts %Q{
7360static VALUE
7361builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7362{
7363 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7364 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7365}}
7366}
7367
7368puts
7369puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
737016.times{|i|
7371 puts " builtin_invoker#{i},"
7372}
7373puts "};"
7374*/
7375
7376static VALUE
7377builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7378{
7379 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7380 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7381}
7382
7383static VALUE
7384builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7385{
7386 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7387 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7388}
7389
7390static VALUE
7391builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7392{
7393 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7394 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7395}
7396
7397static VALUE
7398builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7399{
7400 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7401 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7402}
7403
7404static VALUE
7405builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7406{
7407 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7408 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7409}
7410
7411static VALUE
7412builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7413{
7414 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7415 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7416}
7417
7418static VALUE
7419builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7420{
7421 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7422 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7423}
7424
7425static VALUE
7426builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7427{
7428 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7429 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7430}
7431
7432static VALUE
7433builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7434{
7435 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7436 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7437}
7438
7439static VALUE
7440builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7441{
7442 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7443 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7444}
7445
7446static VALUE
7447builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7448{
7449 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7450 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7451}
7452
7453static VALUE
7454builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7455{
7456 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7457 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7458}
7459
7460static VALUE
7461builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7462{
7463 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7464 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7465}
7466
7467static VALUE
7468builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7469{
7470 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7471 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7472}
7473
7474static VALUE
7475builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7476{
7477 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7478 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7479}
7480
7481static VALUE
7482builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7483{
7484 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7485 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7486}
7487
7488typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7489
7490static builtin_invoker
7491lookup_builtin_invoker(int argc)
7492{
7493 static const builtin_invoker invokers[] = {
7494 builtin_invoker0,
7495 builtin_invoker1,
7496 builtin_invoker2,
7497 builtin_invoker3,
7498 builtin_invoker4,
7499 builtin_invoker5,
7500 builtin_invoker6,
7501 builtin_invoker7,
7502 builtin_invoker8,
7503 builtin_invoker9,
7504 builtin_invoker10,
7505 builtin_invoker11,
7506 builtin_invoker12,
7507 builtin_invoker13,
7508 builtin_invoker14,
7509 builtin_invoker15,
7510 };
7511
7512 return invokers[argc];
7513}
7514
7515static inline VALUE
7516invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7517{
7518 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7519 SETUP_CANARY(canary_p);
7520 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7521 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7522 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7523 return ret;
7524}
7525
7526static VALUE
7527vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7528{
7529 return invoke_bf(ec, cfp, bf, argv);
7530}
7531
7532static VALUE
7533vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7534{
7535 if (0) { // debug print
7536 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7537 for (int i=0; i<bf->argc; i++) {
7538 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7539 }
7540 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7541 (void *)(uintptr_t)bf->func_ptr);
7542 }
7543
7544 if (bf->argc == 0) {
7545 return invoke_bf(ec, cfp, bf, NULL);
7546 }
7547 else {
7548 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7549 return invoke_bf(ec, cfp, bf, argv);
7550 }
7551}
7552
7553// for __builtin_inline!()
7554
7555VALUE
7556rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7557{
7558 const rb_control_frame_t *cfp = ec->cfp;
7559 return cfp->ep[index];
7560}
7561
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2805
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1583
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1475
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1454
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3839
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:683
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:696
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4160
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2164
VALUE rb_cRegexp
Regexp class.
Definition re.c:2657
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1341
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:687
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:256
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1030
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1947
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3717
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1922
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2004
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1905
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1971
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2037
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3772
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5407
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3738
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4009
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2412
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:937
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1500
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3411
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1985
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4202
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4258
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1460
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3878
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3246
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3417
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:422
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2064
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3740
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4280
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:379
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3734
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1628
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2195
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:286
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376