Ruby 3.5.0dev (2025-09-15 revision e19bb99347cc45b27831b73736b0c3baf250e5e6)
vm_insnhelper.c (e19bb99347cc45b27831b73736b0c3baf250e5e6)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
766{
767 const VALUE *ep = cfp->ep;
769
770 while (!VM_ENV_LOCAL_P(ep)) {
771 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
772 ep = VM_ENV_PREV_EP(ep);
773 }
774
775 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
776}
777
778static const rb_iseq_t *
779method_entry_iseqptr(const rb_callable_method_entry_t *me)
780{
781 switch (me->def->type) {
782 case VM_METHOD_TYPE_ISEQ:
783 return me->def->body.iseq.iseqptr;
784 default:
785 return NULL;
786 }
787}
788
789static rb_cref_t *
790method_entry_cref(const rb_callable_method_entry_t *me)
791{
792 switch (me->def->type) {
793 case VM_METHOD_TYPE_ISEQ:
794 return me->def->body.iseq.cref;
795 default:
796 return NULL;
797 }
798}
799
800#if VM_CHECK_MODE == 0
801PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
802#endif
803static rb_cref_t *
804check_cref(VALUE obj, int can_be_svar)
805{
806 if (obj == Qfalse) return NULL;
807
808#if VM_CHECK_MODE > 0
809 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
810#endif
811
812 switch (imemo_type(obj)) {
813 case imemo_ment:
814 return method_entry_cref((rb_callable_method_entry_t *)obj);
815 case imemo_cref:
816 return (rb_cref_t *)obj;
817 case imemo_svar:
818 if (can_be_svar) {
819 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
820 }
821 default:
822#if VM_CHECK_MODE > 0
823 rb_bug("check_method_entry: svar should not be there:");
824#endif
825 return NULL;
826 }
827}
828
829static inline rb_cref_t *
830vm_env_cref(const VALUE *ep)
831{
832 rb_cref_t *cref;
833
834 while (!VM_ENV_LOCAL_P(ep)) {
835 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
836 ep = VM_ENV_PREV_EP(ep);
837 }
838
839 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
840}
841
842static int
843is_cref(const VALUE v, int can_be_svar)
844{
845 if (RB_TYPE_P(v, T_IMEMO)) {
846 switch (imemo_type(v)) {
847 case imemo_cref:
848 return TRUE;
849 case imemo_svar:
850 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
851 default:
852 break;
853 }
854 }
855 return FALSE;
856}
857
858static int
859vm_env_cref_by_cref(const VALUE *ep)
860{
861 while (!VM_ENV_LOCAL_P(ep)) {
862 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
863 ep = VM_ENV_PREV_EP(ep);
864 }
865 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
866}
867
868static rb_cref_t *
869cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
870{
871 const VALUE v = *vptr;
872 rb_cref_t *cref, *new_cref;
873
874 if (RB_TYPE_P(v, T_IMEMO)) {
875 switch (imemo_type(v)) {
876 case imemo_cref:
877 cref = (rb_cref_t *)v;
878 new_cref = vm_cref_dup(cref);
879 if (parent) {
880 RB_OBJ_WRITE(parent, vptr, new_cref);
881 }
882 else {
883 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
884 }
885 return (rb_cref_t *)new_cref;
886 case imemo_svar:
887 if (can_be_svar) {
888 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
889 }
890 /* fall through */
891 case imemo_ment:
892 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
893 default:
894 break;
895 }
896 }
897 return NULL;
898}
899
900static rb_cref_t *
901vm_cref_replace_with_duplicated_cref(const VALUE *ep)
902{
903 if (vm_env_cref_by_cref(ep)) {
904 rb_cref_t *cref;
905 VALUE envval;
906
907 while (!VM_ENV_LOCAL_P(ep)) {
908 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
909 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
910 return cref;
911 }
912 ep = VM_ENV_PREV_EP(ep);
913 }
914 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
915 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
916 }
917 else {
918 rb_bug("vm_cref_dup: unreachable");
919 }
920}
921
922static rb_cref_t *
923vm_get_cref(const VALUE *ep)
924{
925 rb_cref_t *cref = vm_env_cref(ep);
926
927 if (cref != NULL) {
928 return cref;
929 }
930 else {
931 rb_bug("vm_get_cref: unreachable");
932 }
933}
934
935rb_cref_t *
936rb_vm_get_cref(const VALUE *ep)
937{
938 return vm_get_cref(ep);
939}
940
941static rb_cref_t *
942vm_ec_cref(const rb_execution_context_t *ec)
943{
944 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
945
946 if (cfp == NULL) {
947 return NULL;
948 }
949 return vm_get_cref(cfp->ep);
950}
951
952static const rb_cref_t *
953vm_get_const_key_cref(const VALUE *ep)
954{
955 const rb_cref_t *cref = vm_get_cref(ep);
956 const rb_cref_t *key_cref = cref;
957
958 while (cref) {
959 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
960 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
961 return key_cref;
962 }
963 cref = CREF_NEXT(cref);
964 }
965
966 /* does not include singleton class */
967 return NULL;
968}
969
970rb_cref_t *
971rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
972{
973 rb_cref_t *new_cref_head = NULL;
974 rb_cref_t *new_cref_tail = NULL;
975
976 #define ADD_NEW_CREF(new_cref) \
977 if (new_cref_tail) { \
978 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
979 } \
980 else { \
981 new_cref_head = new_cref; \
982 } \
983 new_cref_tail = new_cref;
984
985 while (cref) {
986 rb_cref_t *new_cref;
987 if (CREF_CLASS(cref) == old_klass) {
988 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989 ADD_NEW_CREF(new_cref);
990 return new_cref_head;
991 }
992 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993 cref = CREF_NEXT(cref);
994 ADD_NEW_CREF(new_cref);
995 }
996
997 #undef ADD_NEW_CREF
998
999 // Could we just reuse the original cref?
1000 return new_cref_head;
1001}
1002
1003static rb_cref_t *
1004vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1005{
1006 rb_cref_t *prev_cref = NULL;
1007
1008 if (ep) {
1009 prev_cref = vm_env_cref(ep);
1010 }
1011 else {
1012 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1013
1014 if (cfp) {
1015 prev_cref = vm_env_cref(cfp->ep);
1016 }
1017 }
1018
1019 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1020}
1021
1022static inline VALUE
1023vm_get_cbase(const VALUE *ep)
1024{
1025 const rb_cref_t *cref = vm_get_cref(ep);
1026
1027 return CREF_CLASS_FOR_DEFINITION(cref);
1028}
1029
1030static inline VALUE
1031vm_get_const_base(const VALUE *ep)
1032{
1033 const rb_cref_t *cref = vm_get_cref(ep);
1034
1035 while (cref) {
1036 if (!CREF_PUSHED_BY_EVAL(cref)) {
1037 return CREF_CLASS_FOR_DEFINITION(cref);
1038 }
1039 cref = CREF_NEXT(cref);
1040 }
1041
1042 return Qundef;
1043}
1044
1045static inline void
1046vm_check_if_namespace(VALUE klass)
1047{
1048 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1049 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1050 }
1051}
1052
1053static inline void
1054vm_ensure_not_refinement_module(VALUE self)
1055{
1056 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1057 rb_warn("not defined at the refinement, but at the outer class/module");
1058 }
1059}
1060
1061static inline VALUE
1062vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1063{
1064 return klass;
1065}
1066
1067static inline VALUE
1068vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1069{
1070 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1071 VALUE val;
1072
1073 if (NIL_P(orig_klass) && allow_nil) {
1074 /* in current lexical scope */
1075 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1076 const rb_cref_t *cref;
1077 VALUE klass = Qnil;
1078
1079 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1080 root_cref = CREF_NEXT(root_cref);
1081 }
1082 cref = root_cref;
1083 while (cref && CREF_NEXT(cref)) {
1084 if (CREF_PUSHED_BY_EVAL(cref)) {
1085 klass = Qnil;
1086 }
1087 else {
1088 klass = CREF_CLASS(cref);
1089 }
1090 cref = CREF_NEXT(cref);
1091
1092 if (!NIL_P(klass)) {
1093 VALUE av, am = 0;
1094 rb_const_entry_t *ce;
1095 search_continue:
1096 if ((ce = rb_const_lookup(klass, id))) {
1097 rb_const_warn_if_deprecated(ce, klass, id);
1098 val = ce->value;
1099 if (UNDEF_P(val)) {
1100 if (am == klass) break;
1101 am = klass;
1102 if (is_defined) return 1;
1103 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1104 rb_autoload_load(klass, id);
1105 goto search_continue;
1106 }
1107 else {
1108 if (is_defined) {
1109 return 1;
1110 }
1111 else {
1112 if (UNLIKELY(!rb_ractor_main_p())) {
1113 if (!rb_ractor_shareable_p(val)) {
1114 rb_raise(rb_eRactorIsolationError,
1115 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1116 }
1117 }
1118 return val;
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125 /* search self */
1126 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1127 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1128 }
1129 else {
1130 klass = CLASS_OF(ec->cfp->self);
1131 }
1132
1133 if (is_defined) {
1134 return rb_const_defined(klass, id);
1135 }
1136 else {
1137 return rb_const_get(klass, id);
1138 }
1139 }
1140 else {
1141 vm_check_if_namespace(orig_klass);
1142 if (is_defined) {
1143 return rb_public_const_defined_from(orig_klass, id);
1144 }
1145 else {
1146 return rb_public_const_get_from(orig_klass, id);
1147 }
1148 }
1149}
1150
1151VALUE
1152rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1153{
1154 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1155}
1156
1157static inline VALUE
1158vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1159{
1160 VALUE val = Qnil;
1161 int idx = 0;
1162 int allow_nil = TRUE;
1163 if (segments[0] == idNULL) {
1164 val = rb_cObject;
1165 idx++;
1166 allow_nil = FALSE;
1167 }
1168 while (segments[idx]) {
1169 ID id = segments[idx++];
1170 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1171 allow_nil = FALSE;
1172 }
1173 return val;
1174}
1175
1176
1177static inline VALUE
1178vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1179{
1180 VALUE klass;
1181
1182 if (!cref) {
1183 rb_bug("vm_get_cvar_base: no cref");
1184 }
1185
1186 while (CREF_NEXT(cref) &&
1187 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1188 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1189 cref = CREF_NEXT(cref);
1190 }
1191 if (top_level_raise && !CREF_NEXT(cref)) {
1192 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1193 }
1194
1195 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1196
1197 if (NIL_P(klass)) {
1198 rb_raise(rb_eTypeError, "no class variables available");
1199 }
1200 return klass;
1201}
1202
1203ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1204static inline void
1205fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1206{
1207 if (is_attr) {
1208 vm_cc_attr_index_set(cc, index, shape_id);
1209 }
1210 else {
1211 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1212 }
1213}
1214
1215#define ractor_incidental_shareable_p(cond, val) \
1216 (!(cond) || rb_ractor_shareable_p(val))
1217#define ractor_object_incidental_shareable_p(obj, val) \
1218 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1219
1220ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1221static inline VALUE
1222vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1223{
1224 VALUE fields_obj;
1225#if OPT_IC_FOR_IVAR
1226 if (SPECIAL_CONST_P(obj)) {
1227 return default_value;
1228 }
1229
1230 switch (BUILTIN_TYPE(obj)) {
1231 case T_OBJECT:
1232 fields_obj = obj;
1233 break;
1234 case T_CLASS:
1235 case T_MODULE:
1236 {
1237 if (UNLIKELY(!rb_ractor_main_p())) {
1238 // For two reasons we can only use the fast path on the main
1239 // ractor.
1240 // First, only the main ractor is allowed to set ivars on classes
1241 // and modules. So we can skip locking.
1242 // Second, other ractors need to check the shareability of the
1243 // values returned from the class ivars.
1244
1245 if (default_value == Qundef) { // defined?
1246 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1247 }
1248 else {
1249 goto general_path;
1250 }
1251 }
1252
1253 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1254 break;
1255 }
1256 default:
1257 fields_obj = rb_obj_fields(obj, id);
1258 }
1259
1260 if (!fields_obj) {
1261 return default_value;
1262 }
1263
1264 VALUE val = Qundef;
1265
1266 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1267 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1268
1269 shape_id_t cached_id;
1270 attr_index_t index;
1271
1272 if (is_attr) {
1273 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1274 }
1275 else {
1276 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1277 }
1278
1279 if (LIKELY(cached_id == shape_id)) {
1280 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1281
1282 if (index == ATTR_INDEX_NOT_SET) {
1283 return default_value;
1284 }
1285
1286 val = ivar_list[index];
1287#if USE_DEBUG_COUNTER
1288 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1289
1290 if (RB_TYPE_P(obj, T_OBJECT)) {
1291 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1292 }
1293#endif
1294 RUBY_ASSERT(!UNDEF_P(val));
1295 }
1296 else { // cache miss case
1297#if USE_DEBUG_COUNTER
1298 if (is_attr) {
1299 if (cached_id != INVALID_SHAPE_ID) {
1300 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1301 }
1302 else {
1303 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1304 }
1305 }
1306 else {
1307 if (cached_id != INVALID_SHAPE_ID) {
1308 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1309 }
1310 else {
1311 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1312 }
1313 }
1314 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1315
1316 if (RB_TYPE_P(obj, T_OBJECT)) {
1317 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1318 }
1319#endif
1320
1321 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1322 st_table *table = (st_table *)ivar_list;
1323
1324 RUBY_ASSERT(table);
1325 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1326
1327 if (!st_lookup(table, id, &val)) {
1328 val = default_value;
1329 }
1330 }
1331 else {
1332 shape_id_t previous_cached_id = cached_id;
1333 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1334 // This fills in the cache with the shared cache object.
1335 // "ent" is the shared cache object
1336 if (cached_id != previous_cached_id) {
1337 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1338 }
1339
1340 if (index == ATTR_INDEX_NOT_SET) {
1341 val = default_value;
1342 }
1343 else {
1344 // We fetched the ivar list above
1345 val = ivar_list[index];
1346 RUBY_ASSERT(!UNDEF_P(val));
1347 }
1348 }
1349 else {
1350 if (is_attr) {
1351 vm_cc_attr_index_initialize(cc, shape_id);
1352 }
1353 else {
1354 vm_ic_attr_index_initialize(ic, shape_id);
1355 }
1356
1357 val = default_value;
1358 }
1359 }
1360 }
1361
1362 if (!UNDEF_P(default_value)) {
1363 RUBY_ASSERT(!UNDEF_P(val));
1364 }
1365
1366 return val;
1367
1368general_path:
1369#endif /* OPT_IC_FOR_IVAR */
1370 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1371
1372 if (is_attr) {
1373 return rb_attr_get(obj, id);
1374 }
1375 else {
1376 return rb_ivar_get(obj, id);
1377 }
1378}
1379
1380static void
1381populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1382{
1383 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1384
1385 // Cache population code
1386 if (is_attr) {
1387 vm_cc_attr_index_set(cc, index, next_shape_id);
1388 }
1389 else {
1390 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1391 }
1392}
1393
1394ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1395NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1396NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1397
1398static VALUE
1399vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1400{
1401#if OPT_IC_FOR_IVAR
1402 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1403
1404 rb_check_frozen(obj);
1405
1406 attr_index_t index = rb_ivar_set_index(obj, id, val);
1407 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1408
1409 if (!rb_shape_too_complex_p(next_shape_id)) {
1410 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1411 }
1412
1413 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1414 return val;
1415#else
1416 return rb_ivar_set(obj, id, val);
1417#endif
1418}
1419
1420static VALUE
1421vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1422{
1423 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1424}
1425
1426static VALUE
1427vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1428{
1429 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1430}
1431
1432NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1433static VALUE
1434vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1435{
1436 if (UNLIKELY(!rb_ractor_main_p())) {
1437 return Qundef;
1438 }
1439
1440 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1441 if (UNLIKELY(!fields_obj)) {
1442 return Qundef;
1443 }
1444
1445 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1446
1447 // Cache hit case
1448 if (shape_id == dest_shape_id) {
1449 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1450 }
1451 else if (dest_shape_id != INVALID_SHAPE_ID) {
1452 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1453 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1454 }
1455 else {
1456 return Qundef;
1457 }
1458 }
1459 else {
1460 return Qundef;
1461 }
1462
1463 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1464
1465 if (shape_id != dest_shape_id) {
1466 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1467 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1468 }
1469
1470 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1471
1472 return val;
1473}
1474
1475NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1476static VALUE
1477vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1478{
1479 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1480
1481 // Cache hit case
1482 if (shape_id == dest_shape_id) {
1483 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1484 }
1485 else if (dest_shape_id != INVALID_SHAPE_ID) {
1486 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1487 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1488 }
1489 else {
1490 return Qundef;
1491 }
1492 }
1493 else {
1494 return Qundef;
1495 }
1496
1497 VALUE fields_obj = rb_obj_fields(obj, id);
1498 RUBY_ASSERT(fields_obj);
1499 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1500
1501 if (shape_id != dest_shape_id) {
1502 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1503 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1504 }
1505
1506 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1507
1508 return val;
1509}
1510
1511static inline VALUE
1512vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1513{
1514#if OPT_IC_FOR_IVAR
1515 switch (BUILTIN_TYPE(obj)) {
1516 case T_OBJECT:
1517 {
1518 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1519
1520 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1521 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1522
1523 if (LIKELY(shape_id == dest_shape_id)) {
1524 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1525 VM_ASSERT(!rb_ractor_shareable_p(obj));
1526 }
1527 else if (dest_shape_id != INVALID_SHAPE_ID) {
1528 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1529 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1530
1531 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1532
1533 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1534 }
1535 else {
1536 break;
1537 }
1538 }
1539 else {
1540 break;
1541 }
1542
1543 VALUE *ptr = ROBJECT_FIELDS(obj);
1544
1545 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1546 RB_OBJ_WRITE(obj, &ptr[index], val);
1547
1548 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1549 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1550 return val;
1551 }
1552 break;
1553 case T_CLASS:
1554 case T_MODULE:
1555 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1556 default:
1557 break;
1558 }
1559
1560 return Qundef;
1561#endif /* OPT_IC_FOR_IVAR */
1562}
1563
1564static VALUE
1565update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1566{
1567 VALUE defined_class = 0;
1568 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1569
1570 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1571 defined_class = RBASIC(defined_class)->klass;
1572 }
1573
1574 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1575 if (!rb_cvc_tbl) {
1576 rb_bug("the cvc table should be set");
1577 }
1578
1579 VALUE ent_data;
1580 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1581 rb_bug("should have cvar cache entry");
1582 }
1583
1584 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1585
1586 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1587 ent->cref = cref;
1588 ic->entry = ent;
1589
1590 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1591 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1592 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1593 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1594
1595 return cvar_value;
1596}
1597
1598static inline VALUE
1599vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1600{
1601 const rb_cref_t *cref;
1602 cref = vm_get_cref(GET_EP());
1603
1604 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1605 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1606
1607 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1608 RUBY_ASSERT(!UNDEF_P(v));
1609
1610 return v;
1611 }
1612
1613 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1614
1615 return update_classvariable_cache(iseq, klass, id, cref, ic);
1616}
1617
1618VALUE
1619rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1620{
1621 return vm_getclassvariable(iseq, cfp, id, ic);
1622}
1623
1624static inline void
1625vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1626{
1627 const rb_cref_t *cref;
1628 cref = vm_get_cref(GET_EP());
1629
1630 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1631 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1632
1633 rb_class_ivar_set(ic->entry->class_value, id, val);
1634 return;
1635 }
1636
1637 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1638
1639 rb_cvar_set(klass, id, val);
1640
1641 update_classvariable_cache(iseq, klass, id, cref, ic);
1642}
1643
1644void
1645rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1646{
1647 vm_setclassvariable(iseq, cfp, id, val, ic);
1648}
1649
1650static inline VALUE
1651vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1652{
1653 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1654}
1655
1656static inline void
1657vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1658{
1659 if (RB_SPECIAL_CONST_P(obj)) {
1661 return;
1662 }
1663
1664 shape_id_t dest_shape_id;
1665 attr_index_t index;
1666 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1667
1668 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1669 switch (BUILTIN_TYPE(obj)) {
1670 case T_OBJECT:
1671 break;
1672 case T_CLASS:
1673 case T_MODULE:
1674 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1675 return;
1676 }
1677 break;
1678 default:
1679 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1680 return;
1681 }
1682 }
1683 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1684 }
1685}
1686
1687void
1688rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1689{
1690 vm_setinstancevariable(iseq, obj, id, val, ic);
1691}
1692
1693static VALUE
1694vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1695{
1696 /* continue throw */
1697
1698 if (FIXNUM_P(err)) {
1699 ec->tag->state = RUBY_TAG_FATAL;
1700 }
1701 else if (SYMBOL_P(err)) {
1702 ec->tag->state = TAG_THROW;
1703 }
1704 else if (THROW_DATA_P(err)) {
1705 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1706 }
1707 else {
1708 ec->tag->state = TAG_RAISE;
1709 }
1710 return err;
1711}
1712
1713static VALUE
1714vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1715 const int flag, const VALUE throwobj)
1716{
1717 const rb_control_frame_t *escape_cfp = NULL;
1718 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1719
1720 if (flag != 0) {
1721 /* do nothing */
1722 }
1723 else if (state == TAG_BREAK) {
1724 int is_orphan = 1;
1725 const VALUE *ep = GET_EP();
1726 const rb_iseq_t *base_iseq = GET_ISEQ();
1727 escape_cfp = reg_cfp;
1728
1729 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1730 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1731 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1732 ep = escape_cfp->ep;
1733 base_iseq = escape_cfp->iseq;
1734 }
1735 else {
1736 ep = VM_ENV_PREV_EP(ep);
1737 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1738 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1739 VM_ASSERT(escape_cfp->iseq == base_iseq);
1740 }
1741 }
1742
1743 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1744 /* lambda{... break ...} */
1745 is_orphan = 0;
1746 state = TAG_RETURN;
1747 }
1748 else {
1749 ep = VM_ENV_PREV_EP(ep);
1750
1751 while (escape_cfp < eocfp) {
1752 if (escape_cfp->ep == ep) {
1753 const rb_iseq_t *const iseq = escape_cfp->iseq;
1754 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1755 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1756 unsigned int i;
1757
1758 if (!ct) break;
1759 for (i=0; i < ct->size; i++) {
1760 const struct iseq_catch_table_entry *const entry =
1761 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1762
1763 if (entry->type == CATCH_TYPE_BREAK &&
1764 entry->iseq == base_iseq &&
1765 entry->start < epc && entry->end >= epc) {
1766 if (entry->cont == epc) { /* found! */
1767 is_orphan = 0;
1768 }
1769 break;
1770 }
1771 }
1772 break;
1773 }
1774
1775 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1776 }
1777 }
1778
1779 if (is_orphan) {
1780 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1781 }
1782 }
1783 else if (state == TAG_RETRY) {
1784 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1785
1786 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1787 }
1788 else if (state == TAG_RETURN) {
1789 const VALUE *current_ep = GET_EP();
1790 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1791 int in_class_frame = 0;
1792 int toplevel = 1;
1793 escape_cfp = reg_cfp;
1794
1795 // find target_lep, target_ep
1796 while (!VM_ENV_LOCAL_P(ep)) {
1797 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1798 target_ep = ep;
1799 }
1800 ep = VM_ENV_PREV_EP(ep);
1801 }
1802 target_lep = ep;
1803
1804 while (escape_cfp < eocfp) {
1805 const VALUE *lep = VM_CF_LEP(escape_cfp);
1806
1807 if (!target_lep) {
1808 target_lep = lep;
1809 }
1810
1811 if (lep == target_lep &&
1812 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1813 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1814 in_class_frame = 1;
1815 target_lep = 0;
1816 }
1817
1818 if (lep == target_lep) {
1819 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1820 toplevel = 0;
1821 if (in_class_frame) {
1822 /* lambda {class A; ... return ...; end} */
1823 goto valid_return;
1824 }
1825 else {
1826 const VALUE *tep = current_ep;
1827
1828 while (target_lep != tep) {
1829 if (escape_cfp->ep == tep) {
1830 /* in lambda */
1831 if (tep == target_ep) {
1832 goto valid_return;
1833 }
1834 else {
1835 goto unexpected_return;
1836 }
1837 }
1838 tep = VM_ENV_PREV_EP(tep);
1839 }
1840 }
1841 }
1842 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1843 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1844 case ISEQ_TYPE_TOP:
1845 case ISEQ_TYPE_MAIN:
1846 if (toplevel) {
1847 if (in_class_frame) goto unexpected_return;
1848 if (target_ep == NULL) {
1849 goto valid_return;
1850 }
1851 else {
1852 goto unexpected_return;
1853 }
1854 }
1855 break;
1856 case ISEQ_TYPE_EVAL: {
1857 const rb_iseq_t *is = escape_cfp->iseq;
1858 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1859 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1860 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1861 t = ISEQ_BODY(is)->type;
1862 }
1863 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1864 break;
1865 }
1866 case ISEQ_TYPE_CLASS:
1867 toplevel = 0;
1868 break;
1869 default:
1870 break;
1871 }
1872 }
1873 }
1874
1875 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1876 if (target_ep == NULL) {
1877 goto valid_return;
1878 }
1879 else {
1880 goto unexpected_return;
1881 }
1882 }
1883
1884 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1885 }
1886 unexpected_return:;
1887 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1888
1889 valid_return:;
1890 /* do nothing */
1891 }
1892 else {
1893 rb_bug("isns(throw): unsupported throw type");
1894 }
1895
1896 ec->tag->state = state;
1897 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1898}
1899
1900static VALUE
1901vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1902 rb_num_t throw_state, VALUE throwobj)
1903{
1904 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1905 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1906
1907 if (state != 0) {
1908 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1909 }
1910 else {
1911 return vm_throw_continue(ec, throwobj);
1912 }
1913}
1914
1915VALUE
1916rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1917{
1918 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1919}
1920
1921static inline void
1922vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1923{
1924 int is_splat = flag & 0x01;
1925 const VALUE *ptr;
1926 rb_num_t len;
1927 const VALUE obj = ary;
1928
1929 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1930 ary = obj;
1931 ptr = &ary;
1932 len = 1;
1933 }
1934 else {
1935 ptr = RARRAY_CONST_PTR(ary);
1936 len = (rb_num_t)RARRAY_LEN(ary);
1937 }
1938
1939 if (num + is_splat == 0) {
1940 /* no space left on stack */
1941 }
1942 else if (flag & 0x02) {
1943 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1944 rb_num_t i = 0, j;
1945
1946 if (len < num) {
1947 for (i = 0; i < num - len; i++) {
1948 *cfp->sp++ = Qnil;
1949 }
1950 }
1951
1952 for (j = 0; i < num; i++, j++) {
1953 VALUE v = ptr[len - j - 1];
1954 *cfp->sp++ = v;
1955 }
1956
1957 if (is_splat) {
1958 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1959 }
1960 }
1961 else {
1962 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1963 if (is_splat) {
1964 if (num > len) {
1965 *cfp->sp++ = rb_ary_new();
1966 }
1967 else {
1968 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1969 }
1970 }
1971
1972 if (num > len) {
1973 rb_num_t i = 0;
1974 for (; i < num - len; i++) {
1975 *cfp->sp++ = Qnil;
1976 }
1977
1978 for (rb_num_t j = 0; i < num; i++, j++) {
1979 *cfp->sp++ = ptr[len - j - 1];
1980 }
1981 }
1982 else {
1983 for (rb_num_t j = 0; j < num; j++) {
1984 *cfp->sp++ = ptr[num - j - 1];
1985 }
1986 }
1987 }
1988
1989 RB_GC_GUARD(ary);
1990}
1991
1992static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1993
1994static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1995
1996static struct rb_class_cc_entries *
1997vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
1998{
1999 int initial_capa = 2;
2000 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2001#if VM_CHECK_MODE > 0
2002 ccs->debug_sig = ~(VALUE)ccs;
2003#endif
2004 ccs->capa = initial_capa;
2005 ccs->len = 0;
2006 ccs->cme = cme;
2007 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2008
2009 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2010 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2011 return ccs;
2012}
2013
2014static void
2015vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2016{
2017 if (! vm_cc_markable(cc)) {
2018 return;
2019 }
2020
2021 if (UNLIKELY(ccs->len == ccs->capa)) {
2022 RUBY_ASSERT(ccs->capa > 0);
2023 ccs->capa *= 2;
2024 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2025#if VM_CHECK_MODE > 0
2026 ccs->debug_sig = ~(VALUE)ccs;
2027#endif
2028 // GC?
2029 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2030 }
2031 VM_ASSERT(ccs->len < ccs->capa);
2032
2033 const int pos = ccs->len++;
2034 ccs->entries[pos].argc = vm_ci_argc(ci);
2035 ccs->entries[pos].flag = vm_ci_flag(ci);
2036 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2037
2038 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2039 // for tuning
2040 // vm_mtbl_dump(klass, 0);
2041 }
2042}
2043
2044#if VM_CHECK_MODE > 0
2045void
2046rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2047{
2048 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2049 for (int i=0; i<ccs->len; i++) {
2050 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2051 ccs->entries[i].flag,
2052 ccs->entries[i].argc);
2053 rp(ccs->entries[i].cc);
2054 }
2055}
2056
2057static int
2058vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2059{
2060 VM_ASSERT(vm_ccs_p(ccs));
2061 VM_ASSERT(ccs->len <= ccs->capa);
2062
2063 for (int i=0; i<ccs->len; i++) {
2064 const struct rb_callcache *cc = ccs->entries[i].cc;
2065
2066 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2067 VM_ASSERT(vm_cc_class_check(cc, klass));
2068 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2069 VM_ASSERT(!vm_cc_super_p(cc));
2070 VM_ASSERT(!vm_cc_refinement_p(cc));
2071 }
2072 return TRUE;
2073}
2074#endif
2075
2076const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2077
2078static void
2079vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2080{
2081 ASSERT_vm_locking();
2082
2083 if (rb_multi_ractor_p()) {
2084 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2085 // Another ractor updated the CC table while we were waiting on the VM lock.
2086 // We have to retry.
2087 return;
2088 }
2089
2090 struct rb_class_cc_entries *ccs = NULL;
2091 rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs);
2092
2093 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2094 // Another ractor replaced that entry while we were waiting on the VM lock.
2095 return;
2096 }
2097
2098 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2099 rb_vm_cc_table_delete(new_table, mid);
2100 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2101 }
2102 else {
2103 rb_vm_cc_table_delete(cc_tbl, mid);
2104 }
2105}
2106
2107static const struct rb_callcache *
2108vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2109{
2110 ASSERT_vm_locking();
2111
2112 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2113 const VALUE original_cc_table = cc_tbl;
2114 struct rb_class_cc_entries *ccs = NULL;
2115
2116 if (!cc_tbl) {
2117 cc_tbl = rb_vm_cc_table_create(1);
2118 }
2119 else if (rb_multi_ractor_p()) {
2120 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2121 }
2122
2123 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2124
2125 const rb_callable_method_entry_t *cme;
2126
2127 if (ccs) {
2128 cme = ccs->cme;
2129 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2130
2131 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2132 }
2133 else {
2134 cme = rb_callable_method_entry(klass, mid);
2135 }
2136
2137 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2138
2139 if (cme == NULL) {
2140 // undef or not found: can't cache the information
2141 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2142 return &vm_empty_cc;
2143 }
2144
2145 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2146
2147 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2148
2149 if (ccs == NULL) {
2150 VM_ASSERT(cc_tbl);
2151
2152 if (!LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs))) {
2153 // TODO: required?
2154 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2155 }
2156 }
2157
2158 cme = rb_check_overloaded_cme(cme, ci);
2159
2160 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2161 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2162
2163 VM_ASSERT(vm_cc_cme(cc) != NULL);
2164 VM_ASSERT(cme->called_id == mid);
2165 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2166
2167 if (original_cc_table != cc_tbl) {
2168 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2169 }
2170
2171 return cc;
2172}
2173
2174static const struct rb_callcache *
2175vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2176{
2177 VALUE cc_tbl;
2178 struct rb_class_cc_entries *ccs;
2179retry:
2180 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2181 ccs = NULL;
2182
2183 if (cc_tbl) {
2184 // CCS data is keyed on method id, so we don't need the method id
2185 // for doing comparisons in the `for` loop below.
2186
2187 if (rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
2188 const int ccs_len = ccs->len;
2189
2190 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2191 RB_VM_LOCKING() {
2192 vm_evict_cc(klass, cc_tbl, mid);
2193 }
2194 goto retry;
2195 }
2196 else {
2197 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2198
2199 // We already know the method id is correct because we had
2200 // to look up the ccs_data by method id. All we need to
2201 // compare is argc and flag
2202 unsigned int argc = vm_ci_argc(ci);
2203 unsigned int flag = vm_ci_flag(ci);
2204
2205 for (int i=0; i<ccs_len; i++) {
2206 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2207 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2208 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2209
2210 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2211
2212 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2213 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2214
2215 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2216 VM_ASSERT(ccs_cc->klass == klass);
2217 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2218
2219 return ccs_cc;
2220 }
2221 }
2222 }
2223 }
2224 }
2225
2226 RB_GC_GUARD(cc_tbl);
2227 return NULL;
2228}
2229
2230static const struct rb_callcache *
2231vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2232{
2233 const ID mid = vm_ci_mid(ci);
2234
2235 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2236 if (cc) {
2237 return cc;
2238 }
2239
2240 RB_VM_LOCKING() {
2241 if (rb_multi_ractor_p()) {
2242 // The CC may have been populated by another ractor while we were waiting on the lock,
2243 // so we must lookup a second time.
2244 cc = vm_lookup_cc(klass, ci, mid);
2245 }
2246
2247 if (!cc) {
2248 cc = vm_populate_cc(klass, ci, mid);
2249 }
2250 }
2251
2252 return cc;
2253}
2254
2255const struct rb_callcache *
2256rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2257{
2258 const struct rb_callcache *cc;
2259
2260 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2261
2262 cc = vm_search_cc(klass, ci);
2263
2264 VM_ASSERT(cc);
2265 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2266 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2267 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2268 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2269 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2270
2271 return cc;
2272}
2273
2274static const struct rb_callcache *
2275vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2276{
2277#if USE_DEBUG_COUNTER
2278 const struct rb_callcache *old_cc = cd->cc;
2279#endif
2280
2281 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2282
2283#if OPT_INLINE_METHOD_CACHE
2284 cd->cc = cc;
2285
2286 const struct rb_callcache *empty_cc = &vm_empty_cc;
2287 if (cd_owner && cc != empty_cc) {
2288 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2289 }
2290
2291#if USE_DEBUG_COUNTER
2292 if (!old_cc || old_cc == empty_cc) {
2293 // empty
2294 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2295 }
2296 else if (old_cc == cc) {
2297 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2298 }
2299 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2300 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2301 }
2302 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2303 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2304 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2305 }
2306 else {
2307 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2308 }
2309#endif
2310#endif // OPT_INLINE_METHOD_CACHE
2311
2312 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2313 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2314
2315 return cc;
2316}
2317
2318ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2319static const struct rb_callcache *
2320vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2321{
2322 const struct rb_callcache *cc = cd->cc;
2323
2324#if OPT_INLINE_METHOD_CACHE
2325 if (LIKELY(vm_cc_class_check(cc, klass))) {
2326 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2327 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2328 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2329 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2330 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2331 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2332
2333 return cc;
2334 }
2335 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2336 }
2337 else {
2338 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2339 }
2340#endif
2341
2342 return vm_search_method_slowpath0(cd_owner, cd, klass);
2343}
2344
2345static const struct rb_callable_method_entry_struct *
2346vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2347{
2348 VALUE klass = CLASS_OF(recv);
2349 VM_ASSERT(klass != Qfalse);
2350 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2351
2352 const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
2353 return vm_cc_cme(cc);
2354}
2355
2356#if __has_attribute(transparent_union)
2357typedef union {
2358 VALUE (*anyargs)(ANYARGS);
2359 VALUE (*f00)(VALUE);
2360 VALUE (*f01)(VALUE, VALUE);
2361 VALUE (*f02)(VALUE, VALUE, VALUE);
2362 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2363 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2364 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2365 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2366 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2375 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2376} __attribute__((__transparent_union__)) cfunc_type;
2377# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2378#else
2379typedef VALUE (*cfunc_type)(ANYARGS);
2380# define make_cfunc_type(f) (cfunc_type)(f)
2381#endif
2382
2383static inline int
2384check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2385{
2386 if (! me) {
2387 return false;
2388 }
2389 else {
2390 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2391 VM_ASSERT(callable_method_entry_p(me));
2392 VM_ASSERT(me->def);
2393 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2394 return false;
2395 }
2396 else {
2397#if __has_attribute(transparent_union)
2398 return me->def->body.cfunc.func == func.anyargs;
2399#else
2400 return me->def->body.cfunc.func == func;
2401#endif
2402 }
2403 }
2404}
2405
2406static inline int
2407check_method_basic_definition(const rb_callable_method_entry_t *me)
2408{
2409 return me && METHOD_ENTRY_BASIC(me);
2410}
2411
2412static inline int
2413vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2414{
2415 VM_ASSERT(iseq != NULL);
2416 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
2417 return check_cfunc(cme, func);
2418}
2419
2420int
2421rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2422{
2423 return vm_method_cfunc_is(iseq, cd, recv, func);
2424}
2425
2426#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2427#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2428
2429#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2430
2431static inline bool
2432FIXNUM_2_P(VALUE a, VALUE b)
2433{
2434 /* FIXNUM_P(a) && FIXNUM_P(b)
2435 * == ((a & 1) && (b & 1))
2436 * == a & b & 1 */
2437 SIGNED_VALUE x = a;
2438 SIGNED_VALUE y = b;
2439 SIGNED_VALUE z = x & y & 1;
2440 return z == 1;
2441}
2442
2443static inline bool
2444FLONUM_2_P(VALUE a, VALUE b)
2445{
2446#if USE_FLONUM
2447 /* FLONUM_P(a) && FLONUM_P(b)
2448 * == ((a & 3) == 2) && ((b & 3) == 2)
2449 * == ! ((a ^ 2) | (b ^ 2) & 3)
2450 */
2451 SIGNED_VALUE x = a;
2452 SIGNED_VALUE y = b;
2453 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2454 return !z;
2455#else
2456 return false;
2457#endif
2458}
2459
2460static VALUE
2461opt_equality_specialized(VALUE recv, VALUE obj)
2462{
2463 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2464 goto compare_by_identity;
2465 }
2466 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2467 goto compare_by_identity;
2468 }
2469 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2470 goto compare_by_identity;
2471 }
2472 else if (SPECIAL_CONST_P(recv)) {
2473 //
2474 }
2475 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2476 double a = RFLOAT_VALUE(recv);
2477 double b = RFLOAT_VALUE(obj);
2478
2479#if MSC_VERSION_BEFORE(1300)
2480 if (isnan(a)) {
2481 return Qfalse;
2482 }
2483 else if (isnan(b)) {
2484 return Qfalse;
2485 }
2486 else
2487#endif
2488 return RBOOL(a == b);
2489 }
2490 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2491 if (recv == obj) {
2492 return Qtrue;
2493 }
2494 else if (RB_TYPE_P(obj, T_STRING)) {
2495 return rb_str_eql_internal(obj, recv);
2496 }
2497 }
2498 return Qundef;
2499
2500 compare_by_identity:
2501 return RBOOL(recv == obj);
2502}
2503
2504static VALUE
2505opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2506{
2507 VM_ASSERT(cd_owner != NULL);
2508
2509 VALUE val = opt_equality_specialized(recv, obj);
2510 if (!UNDEF_P(val)) return val;
2511
2512 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2513 return Qundef;
2514 }
2515 else {
2516 return RBOOL(recv == obj);
2517 }
2518}
2519
2520#undef EQ_UNREDEFINED_P
2521
2522static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2523NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2524
2525static VALUE
2526opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2527{
2528 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2529
2530 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2531 return RBOOL(recv == obj);
2532 }
2533 else {
2534 return Qundef;
2535 }
2536}
2537
2538static VALUE
2539opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2540{
2541 VALUE val = opt_equality_specialized(recv, obj);
2542 if (!UNDEF_P(val)) {
2543 return val;
2544 }
2545 else {
2546 return opt_equality_by_mid_slowpath(recv, obj, mid);
2547 }
2548}
2549
2550VALUE
2551rb_equal_opt(VALUE obj1, VALUE obj2)
2552{
2553 return opt_equality_by_mid(obj1, obj2, idEq);
2554}
2555
2556VALUE
2557rb_eql_opt(VALUE obj1, VALUE obj2)
2558{
2559 return opt_equality_by_mid(obj1, obj2, idEqlP);
2560}
2561
2562extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2563extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2564
2565static VALUE
2566check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2567{
2568 switch (type) {
2569 case VM_CHECKMATCH_TYPE_WHEN:
2570 return pattern;
2571 case VM_CHECKMATCH_TYPE_RESCUE:
2572 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2573 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2574 }
2575 /* fall through */
2576 case VM_CHECKMATCH_TYPE_CASE: {
2577 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2578 }
2579 default:
2580 rb_bug("check_match: unreachable");
2581 }
2582}
2583
2584
2585#if MSC_VERSION_BEFORE(1300)
2586#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2587#else
2588#define CHECK_CMP_NAN(a, b) /* do nothing */
2589#endif
2590
2591static inline VALUE
2592double_cmp_lt(double a, double b)
2593{
2594 CHECK_CMP_NAN(a, b);
2595 return RBOOL(a < b);
2596}
2597
2598static inline VALUE
2599double_cmp_le(double a, double b)
2600{
2601 CHECK_CMP_NAN(a, b);
2602 return RBOOL(a <= b);
2603}
2604
2605static inline VALUE
2606double_cmp_gt(double a, double b)
2607{
2608 CHECK_CMP_NAN(a, b);
2609 return RBOOL(a > b);
2610}
2611
2612static inline VALUE
2613double_cmp_ge(double a, double b)
2614{
2615 CHECK_CMP_NAN(a, b);
2616 return RBOOL(a >= b);
2617}
2618
2619// Copied by vm_dump.c
2620static inline VALUE *
2621vm_base_ptr(const rb_control_frame_t *cfp)
2622{
2623 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2624
2625 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2626 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2627
2628 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2629 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2630 int params = ISEQ_BODY(cfp->iseq)->param.size;
2631
2632 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2633 bp += vm_ci_argc(ci);
2634 }
2635
2636 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2637 /* adjust `self' */
2638 bp += 1;
2639 }
2640#if VM_DEBUG_BP_CHECK
2641 if (bp != cfp->bp_check) {
2642 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2643 (long)(cfp->bp_check - GET_EC()->vm_stack),
2644 (long)(bp - GET_EC()->vm_stack));
2645 rb_bug("vm_base_ptr: unreachable");
2646 }
2647#endif
2648 return bp;
2649 }
2650 else {
2651 return NULL;
2652 }
2653}
2654
2655VALUE *
2656rb_vm_base_ptr(const rb_control_frame_t *cfp)
2657{
2658 return vm_base_ptr(cfp);
2659}
2660
2661/* method call processes with call_info */
2662
2663#include "vm_args.c"
2664
2665static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2666ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2667static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2668static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2669static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2670static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2671static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2672
2673static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2674
2675static VALUE
2676vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2677{
2678 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2679
2680 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2681}
2682
2683static VALUE
2684vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2685{
2686 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2687
2688 const struct rb_callcache *cc = calling->cc;
2689 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2690 int param = ISEQ_BODY(iseq)->param.size;
2691 int local = ISEQ_BODY(iseq)->local_table_size;
2692 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2693}
2694
2695bool
2696rb_simple_iseq_p(const rb_iseq_t *iseq)
2697{
2698 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2699 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2700 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2701 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2702 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2703 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2704 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2705 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2706}
2707
2708bool
2709rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2710{
2711 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2712 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2713 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2714 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2715 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2716 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2717 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2718 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2719}
2720
2721bool
2722rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2723{
2724 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2725 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2726 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2727 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2728 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2729 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2730 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2731}
2732
2733#define ALLOW_HEAP_ARGV (-2)
2734#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2735
2736static inline bool
2737vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2738{
2739 vm_check_canary(GET_EC(), cfp->sp);
2740 bool ret = false;
2741
2742 if (!NIL_P(ary)) {
2743 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2744 long len = RARRAY_LEN(ary);
2745 int argc = calling->argc;
2746
2747 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2748 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2749 * a temporary array, instead of trying to keeping arguments on the VM stack.
2750 */
2751 VALUE *argv = cfp->sp - argc;
2752 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2753 rb_ary_cat(argv_ary, argv, argc);
2754 rb_ary_cat(argv_ary, ptr, len);
2755 cfp->sp -= argc - 1;
2756 cfp->sp[-1] = argv_ary;
2757 calling->argc = 1;
2758 calling->heap_argv = argv_ary;
2759 RB_GC_GUARD(ary);
2760 }
2761 else {
2762 long i;
2763
2764 if (max_args >= 0 && len + argc > max_args) {
2765 /* If only a given max_args is allowed, copy up to max args.
2766 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2767 * where additional arguments are ignored.
2768 *
2769 * Also, copy up to one more argument than the maximum,
2770 * in case it is an empty keyword hash that will be removed.
2771 */
2772 calling->argc += len - (max_args - argc + 1);
2773 len = max_args - argc + 1;
2774 ret = true;
2775 }
2776 else {
2777 /* Unset heap_argv if set originally. Can happen when
2778 * forwarding modified arguments, where heap_argv was used
2779 * originally, but heap_argv not supported by the forwarded
2780 * method in all cases.
2781 */
2782 calling->heap_argv = 0;
2783 }
2784 CHECK_VM_STACK_OVERFLOW(cfp, len);
2785
2786 for (i = 0; i < len; i++) {
2787 *cfp->sp++ = ptr[i];
2788 }
2789 calling->argc += i;
2790 }
2791 }
2792
2793 return ret;
2794}
2795
2796static inline void
2797vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2798{
2799 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2800 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2801 const VALUE h = rb_hash_new_with_size(kw_len);
2802 VALUE *sp = cfp->sp;
2803 int i;
2804
2805 for (i=0; i<kw_len; i++) {
2806 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2807 }
2808 (sp-kw_len)[0] = h;
2809
2810 cfp->sp -= kw_len - 1;
2811 calling->argc -= kw_len - 1;
2812 calling->kw_splat = 1;
2813}
2814
2815static inline VALUE
2816vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2817{
2818 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2819 if (keyword_hash != Qnil) {
2820 /* Convert a non-hash keyword splat to a new hash */
2821 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2822 }
2823 }
2824 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2825 /* Convert a hash keyword splat to a new hash unless
2826 * a mutable keyword splat was passed.
2827 * Skip allocating new hash for empty keyword splat, as empty
2828 * keyword splat will be ignored by both callers.
2829 */
2830 keyword_hash = rb_hash_dup(keyword_hash);
2831 }
2832 return keyword_hash;
2833}
2834
2835static inline void
2836CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2837 struct rb_calling_info *restrict calling,
2838 const struct rb_callinfo *restrict ci, int max_args)
2839{
2840 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2841 if (IS_ARGS_KW_SPLAT(ci)) {
2842 // f(*a, **kw)
2843 VM_ASSERT(calling->kw_splat == 1);
2844
2845 cfp->sp -= 2;
2846 calling->argc -= 2;
2847 VALUE ary = cfp->sp[0];
2848 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2849
2850 // splat a
2851 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2852
2853 // put kw
2854 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2855 if (UNLIKELY(calling->heap_argv)) {
2856 rb_ary_push(calling->heap_argv, kwh);
2857 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2858 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2859 calling->kw_splat = 0;
2860 }
2861 }
2862 else {
2863 cfp->sp[0] = kwh;
2864 cfp->sp++;
2865 calling->argc++;
2866
2867 VM_ASSERT(calling->kw_splat == 1);
2868 }
2869 }
2870 else {
2871 calling->kw_splat = 0;
2872 }
2873 }
2874 else {
2875 // f(*a)
2876 VM_ASSERT(calling->kw_splat == 0);
2877
2878 cfp->sp -= 1;
2879 calling->argc -= 1;
2880 VALUE ary = cfp->sp[0];
2881
2882 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2883 goto check_keyword;
2884 }
2885
2886 // check the last argument
2887 VALUE last_hash, argv_ary;
2888 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2889 if (!IS_ARGS_KEYWORD(ci) &&
2890 RARRAY_LEN(argv_ary) > 0 &&
2891 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2892 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2893
2894 rb_ary_pop(argv_ary);
2895 if (!RHASH_EMPTY_P(last_hash)) {
2896 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2897 calling->kw_splat = 1;
2898 }
2899 }
2900 }
2901 else {
2902check_keyword:
2903 if (!IS_ARGS_KEYWORD(ci) &&
2904 calling->argc > 0 &&
2905 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2906 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2907
2908 if (RHASH_EMPTY_P(last_hash)) {
2909 calling->argc--;
2910 cfp->sp -= 1;
2911 }
2912 else {
2913 cfp->sp[-1] = rb_hash_dup(last_hash);
2914 calling->kw_splat = 1;
2915 }
2916 }
2917 }
2918 }
2919 }
2920 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2921 // f(**kw)
2922 VM_ASSERT(calling->kw_splat == 1);
2923 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2924
2925 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2926 cfp->sp--;
2927 calling->argc--;
2928 calling->kw_splat = 0;
2929 }
2930 else {
2931 cfp->sp[-1] = kwh;
2932 }
2933 }
2934 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2935 // f(k1:1, k2:2)
2936 VM_ASSERT(calling->kw_splat == 0);
2937
2938 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2939 * by creating a keyword hash.
2940 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2941 */
2942 vm_caller_setup_arg_kw(cfp, calling, ci);
2943 }
2944}
2945
2946#define USE_OPT_HIST 0
2947
2948#if USE_OPT_HIST
2949#define OPT_HIST_MAX 64
2950static int opt_hist[OPT_HIST_MAX+1];
2951
2952__attribute__((destructor))
2953static void
2954opt_hist_show_results_at_exit(void)
2955{
2956 for (int i=0; i<OPT_HIST_MAX; i++) {
2957 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2958 }
2959}
2960#endif
2961
2962static VALUE
2963vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2964 struct rb_calling_info *calling)
2965{
2966 const struct rb_callcache *cc = calling->cc;
2967 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2968 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2969 const int opt = calling->argc - lead_num;
2970 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2971 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2972 const int param = ISEQ_BODY(iseq)->param.size;
2973 const int local = ISEQ_BODY(iseq)->local_table_size;
2974 const int delta = opt_num - opt;
2975
2976 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2977
2978#if USE_OPT_HIST
2979 if (opt_pc < OPT_HIST_MAX) {
2980 opt_hist[opt]++;
2981 }
2982 else {
2983 opt_hist[OPT_HIST_MAX]++;
2984 }
2985#endif
2986
2987 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2988}
2989
2990static VALUE
2991vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2992 struct rb_calling_info *calling)
2993{
2994 const struct rb_callcache *cc = calling->cc;
2995 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2996 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2997 const int opt = calling->argc - lead_num;
2998 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2999
3000 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3001
3002#if USE_OPT_HIST
3003 if (opt_pc < OPT_HIST_MAX) {
3004 opt_hist[opt]++;
3005 }
3006 else {
3007 opt_hist[OPT_HIST_MAX]++;
3008 }
3009#endif
3010
3011 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3012}
3013
3014static void
3015args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3016 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3017 VALUE *const locals);
3018
3019static VALUE
3020vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3021 struct rb_calling_info *calling)
3022{
3023 const struct rb_callcache *cc = calling->cc;
3024 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3025 int param_size = ISEQ_BODY(iseq)->param.size;
3026 int local_size = ISEQ_BODY(iseq)->local_table_size;
3027
3028 // Setting up local size and param size
3029 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3030
3031 local_size = local_size + vm_ci_argc(calling->cd->ci);
3032 param_size = param_size + vm_ci_argc(calling->cd->ci);
3033
3034 cfp->sp[0] = (VALUE)calling->cd->ci;
3035
3036 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3037}
3038
3039static VALUE
3040vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3041 struct rb_calling_info *calling)
3042{
3043 const struct rb_callinfo *ci = calling->cd->ci;
3044 const struct rb_callcache *cc = calling->cc;
3045
3046 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3047 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3048
3049 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3050 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3051 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3052 const int ci_kw_len = kw_arg->keyword_len;
3053 const VALUE * const ci_keywords = kw_arg->keywords;
3054 VALUE *argv = cfp->sp - calling->argc;
3055 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3056 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3057 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3058 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3059 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3060
3061 int param = ISEQ_BODY(iseq)->param.size;
3062 int local = ISEQ_BODY(iseq)->local_table_size;
3063 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3064}
3065
3066static VALUE
3067vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3068 struct rb_calling_info *calling)
3069{
3070 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3071 const struct rb_callcache *cc = calling->cc;
3072
3073 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3074 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3075
3076 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3077 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3078 VALUE * const argv = cfp->sp - calling->argc;
3079 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3080
3081 int i;
3082 for (i=0; i<kw_param->num; i++) {
3083 klocals[i] = kw_param->default_values[i];
3084 }
3085 klocals[i] = INT2FIX(0); // kw specify flag
3086 // NOTE:
3087 // nobody check this value, but it should be cleared because it can
3088 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3089
3090 int param = ISEQ_BODY(iseq)->param.size;
3091 int local = ISEQ_BODY(iseq)->local_table_size;
3092 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3093}
3094
3095static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3096
3097static VALUE
3098vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3099 struct rb_calling_info *calling)
3100{
3101 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3102 cfp->sp -= (calling->argc + 1);
3103 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3104 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3105}
3106
3107VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3108
3109static void
3110warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3111{
3112 rb_vm_t *vm = GET_VM();
3113 set_table *dup_check_table = vm->unused_block_warning_table;
3114 st_data_t key;
3115 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3116
3117 union {
3118 VALUE v;
3119 unsigned char b[SIZEOF_VALUE];
3120 } k1 = {
3121 .v = (VALUE)pc,
3122 }, k2 = {
3123 .v = (VALUE)cme->def,
3124 };
3125
3126 // relax check
3127 if (!strict_unused_block) {
3128 key = (st_data_t)cme->def->original_id;
3129
3130 if (set_table_lookup(dup_check_table, key)) {
3131 return;
3132 }
3133 }
3134
3135 // strict check
3136 // make unique key from pc and me->def pointer
3137 key = 0;
3138 for (int i=0; i<SIZEOF_VALUE; i++) {
3139 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3140 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3141 }
3142
3143 if (0) {
3144 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3145 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3146 fprintf(stderr, "key:%p\n", (void *)key);
3147 }
3148
3149 // duplication check
3150 if (set_insert(dup_check_table, key)) {
3151 // already shown
3152 }
3153 else if (RTEST(ruby_verbose) || strict_unused_block) {
3154 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3155 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3156
3157 if (!NIL_P(m_loc)) {
3158 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3159 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3160 }
3161 else {
3162 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3163 }
3164 }
3165}
3166
3167static inline int
3168vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3169 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3170{
3171 const struct rb_callinfo *ci = calling->cd->ci;
3172 const struct rb_callcache *cc = calling->cc;
3173
3174 VM_ASSERT((vm_ci_argc(ci), 1));
3175 VM_ASSERT(vm_cc_cme(cc) != NULL);
3176
3177 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3178 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3179 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3180 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3181 }
3182
3183 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3184 if (LIKELY(rb_simple_iseq_p(iseq))) {
3185 rb_control_frame_t *cfp = ec->cfp;
3186 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3187 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3188
3189 if (calling->argc != lead_num) {
3190 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3191 }
3192
3193 //VM_ASSERT(ci == calling->cd->ci);
3194 VM_ASSERT(cc == calling->cc);
3195
3196 if (vm_call_iseq_optimizable_p(ci, cc)) {
3197 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3198 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3199 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3200 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3201 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3202 }
3203 else {
3204 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3205 }
3206 }
3207 return 0;
3208 }
3209 else if (rb_iseq_only_optparam_p(iseq)) {
3210 rb_control_frame_t *cfp = ec->cfp;
3211
3212 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3213 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3214
3215 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3216 const int argc = calling->argc;
3217 const int opt = argc - lead_num;
3218
3219 if (opt < 0 || opt > opt_num) {
3220 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3221 }
3222
3223 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3224 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3225 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3226 vm_call_cacheable(ci, cc));
3227 }
3228 else {
3229 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3230 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3231 vm_call_cacheable(ci, cc));
3232 }
3233
3234 /* initialize opt vars for self-references */
3235 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3236 for (int i=argc; i<lead_num + opt_num; i++) {
3237 argv[i] = Qnil;
3238 }
3239 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3240 }
3241 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3242 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3243 const int argc = calling->argc;
3244 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3245
3246 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3247 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3248
3249 if (argc - kw_arg->keyword_len == lead_num) {
3250 const int ci_kw_len = kw_arg->keyword_len;
3251 const VALUE * const ci_keywords = kw_arg->keywords;
3252 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3253 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3254
3255 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3256 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3257
3258 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3259 vm_call_cacheable(ci, cc));
3260
3261 return 0;
3262 }
3263 }
3264 else if (argc == lead_num) {
3265 /* no kwarg */
3266 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3267 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3268
3269 if (klocals[kw_param->num] == INT2FIX(0)) {
3270 /* copy from default_values */
3271 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3272 vm_call_cacheable(ci, cc));
3273 }
3274
3275 return 0;
3276 }
3277 }
3278 }
3279
3280 // Called iseq is using ... param
3281 // def foo(...) # <- iseq for foo will have "forwardable"
3282 //
3283 // We want to set the `...` local to the caller's CI
3284 // foo(1, 2) # <- the ci for this should end up as `...`
3285 //
3286 // So hopefully the stack looks like:
3287 //
3288 // => 1
3289 // => 2
3290 // => *
3291 // => **
3292 // => &
3293 // => ... # <- points at `foo`s CI
3294 // => cref_or_me
3295 // => specval
3296 // => type
3297 //
3298 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3299 bool can_fastpath = true;
3300
3301 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3302 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3303 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3304 ci = vm_ci_new_runtime(
3305 vm_ci_mid(ci),
3306 vm_ci_flag(ci),
3307 vm_ci_argc(ci),
3308 vm_ci_kwarg(ci));
3309 }
3310 else {
3311 ci = forward_cd->caller_ci;
3312 }
3313 can_fastpath = false;
3314 }
3315 // C functions calling iseqs will stack allocate a CI,
3316 // so we need to convert it to heap allocated
3317 if (!vm_ci_markable(ci)) {
3318 ci = vm_ci_new_runtime(
3319 vm_ci_mid(ci),
3320 vm_ci_flag(ci),
3321 vm_ci_argc(ci),
3322 vm_ci_kwarg(ci));
3323 can_fastpath = false;
3324 }
3325 argv[param_size - 1] = (VALUE)ci;
3326 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3327 return 0;
3328 }
3329
3330 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3331}
3332
3333static void
3334vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3335{
3336 // This case is when the caller is using a ... parameter.
3337 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3338 // In this case the caller's caller's CI will be on the stack.
3339 //
3340 // For example:
3341 //
3342 // def bar(a, b); a + b; end
3343 // def foo(...); bar(...); end
3344 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3345 //
3346 // Stack layout will be:
3347 //
3348 // > 1
3349 // > 2
3350 // > CI for foo(1, 2)
3351 // > cref_or_me
3352 // > specval
3353 // > type
3354 // > receiver
3355 // > CI for foo(1, 2), via `getlocal ...`
3356 // > ( SP points here )
3357 const VALUE * lep = VM_CF_LEP(cfp);
3358
3359 const rb_iseq_t *iseq;
3360
3361 // If we're in an escaped environment (lambda for example), get the iseq
3362 // from the captured env.
3363 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3364 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3365 iseq = env->iseq;
3366 }
3367 else { // Otherwise use the lep to find the caller
3368 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3369 }
3370
3371 // Our local storage is below the args we need to copy
3372 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3373
3374 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3375 VALUE * to = cfp->sp - 1; // clobber the CI
3376
3377 if (RTEST(splat)) {
3378 to -= 1; // clobber the splat array
3379 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3380 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3381 to += RARRAY_LEN(splat);
3382 }
3383
3384 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3385 MEMCPY(to, from, VALUE, argc);
3386 cfp->sp = to + argc;
3387
3388 // Stack layout should now be:
3389 //
3390 // > 1
3391 // > 2
3392 // > CI for foo(1, 2)
3393 // > cref_or_me
3394 // > specval
3395 // > type
3396 // > receiver
3397 // > 1
3398 // > 2
3399 // > ( SP points here )
3400}
3401
3402static VALUE
3403vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3404{
3405 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3406
3407 const struct rb_callcache *cc = calling->cc;
3408 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3409 int param_size = ISEQ_BODY(iseq)->param.size;
3410 int local_size = ISEQ_BODY(iseq)->local_table_size;
3411
3412 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3413
3414 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3415 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3416}
3417
3418static VALUE
3419vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3420{
3421 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3422
3423 const struct rb_callcache *cc = calling->cc;
3424 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3425 int param_size = ISEQ_BODY(iseq)->param.size;
3426 int local_size = ISEQ_BODY(iseq)->local_table_size;
3427
3428 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3429
3430 // Setting up local size and param size
3431 local_size = local_size + vm_ci_argc(calling->cd->ci);
3432 param_size = param_size + vm_ci_argc(calling->cd->ci);
3433
3434 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3435 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3436}
3437
3438static inline VALUE
3439vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3440 int opt_pc, int param_size, int local_size)
3441{
3442 const struct rb_callinfo *ci = calling->cd->ci;
3443 const struct rb_callcache *cc = calling->cc;
3444
3445 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3446 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3447 }
3448 else {
3449 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3450 }
3451}
3452
3453static inline VALUE
3454vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3455 int opt_pc, int param_size, int local_size)
3456{
3457 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3458 VALUE *argv = cfp->sp - calling->argc;
3459 VALUE *sp = argv + param_size;
3460 cfp->sp = argv - 1 /* recv */;
3461
3462 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3463 calling->block_handler, (VALUE)me,
3464 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3465 local_size - param_size,
3466 ISEQ_BODY(iseq)->stack_max);
3467 return Qundef;
3468}
3469
3470static inline VALUE
3471vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3472{
3473 const struct rb_callcache *cc = calling->cc;
3474 unsigned int i;
3475 VALUE *argv = cfp->sp - calling->argc;
3476 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3477 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3478 VALUE *src_argv = argv;
3479 VALUE *sp_orig, *sp;
3480 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3481
3482 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3483 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3484 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3485 dst_captured->code.val = src_captured->code.val;
3486 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3487 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3488 }
3489 else {
3490 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3491 }
3492 }
3493
3494 vm_pop_frame(ec, cfp, cfp->ep);
3495 cfp = ec->cfp;
3496
3497 sp_orig = sp = cfp->sp;
3498
3499 /* push self */
3500 sp[0] = calling->recv;
3501 sp++;
3502
3503 /* copy arguments */
3504 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3505 *sp++ = src_argv[i];
3506 }
3507
3508 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3509 calling->recv, calling->block_handler, (VALUE)me,
3510 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3511 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3512 ISEQ_BODY(iseq)->stack_max);
3513
3514 cfp->sp = sp_orig;
3515
3516 return Qundef;
3517}
3518
3519static void
3520ractor_unsafe_check(void)
3521{
3522 if (!rb_ractor_main_p()) {
3523 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3524 }
3525}
3526
3527static VALUE
3528call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3529{
3530 ractor_unsafe_check();
3531 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3532 return (*f)(recv, rb_ary_new4(argc, argv));
3533}
3534
3535static VALUE
3536call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3537{
3538 ractor_unsafe_check();
3539 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3540 return (*f)(argc, argv, recv);
3541}
3542
3543static VALUE
3544call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3545{
3546 ractor_unsafe_check();
3547 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3548 return (*f)(recv);
3549}
3550
3551static VALUE
3552call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3553{
3554 ractor_unsafe_check();
3555 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3556 return (*f)(recv, argv[0]);
3557}
3558
3559static VALUE
3560call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3561{
3562 ractor_unsafe_check();
3563 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3564 return (*f)(recv, argv[0], argv[1]);
3565}
3566
3567static VALUE
3568call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3569{
3570 ractor_unsafe_check();
3571 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3572 return (*f)(recv, argv[0], argv[1], argv[2]);
3573}
3574
3575static VALUE
3576call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3577{
3578 ractor_unsafe_check();
3579 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3580 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3581}
3582
3583static VALUE
3584call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3585{
3586 ractor_unsafe_check();
3587 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3588 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3589}
3590
3591static VALUE
3592call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3593{
3594 ractor_unsafe_check();
3596 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3597}
3598
3599static VALUE
3600call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3601{
3602 ractor_unsafe_check();
3604 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3605}
3606
3607static VALUE
3608call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3609{
3610 ractor_unsafe_check();
3612 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3613}
3614
3615static VALUE
3616call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3617{
3618 ractor_unsafe_check();
3620 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3621}
3622
3623static VALUE
3624call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3625{
3626 ractor_unsafe_check();
3628 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3629}
3630
3631static VALUE
3632call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3633{
3634 ractor_unsafe_check();
3636 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3637}
3638
3639static VALUE
3640call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3641{
3642 ractor_unsafe_check();
3644 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3645}
3646
3647static VALUE
3648call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3649{
3650 ractor_unsafe_check();
3652 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3653}
3654
3655static VALUE
3656call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657{
3658 ractor_unsafe_check();
3660 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3661}
3662
3663static VALUE
3664call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3665{
3666 ractor_unsafe_check();
3668 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3669}
3670
3671static VALUE
3672ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3673{
3674 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3675 return (*f)(recv, rb_ary_new4(argc, argv));
3676}
3677
3678static VALUE
3679ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3680{
3681 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3682 return (*f)(argc, argv, recv);
3683}
3684
3685static VALUE
3686ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3687{
3688 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3689 return (*f)(recv);
3690}
3691
3692static VALUE
3693ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3694{
3695 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3696 return (*f)(recv, argv[0]);
3697}
3698
3699static VALUE
3700ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3701{
3702 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3703 return (*f)(recv, argv[0], argv[1]);
3704}
3705
3706static VALUE
3707ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3708{
3709 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3710 return (*f)(recv, argv[0], argv[1], argv[2]);
3711}
3712
3713static VALUE
3714ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3715{
3716 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3717 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3718}
3719
3720static VALUE
3721ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3722{
3723 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3724 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3725}
3726
3727static VALUE
3728ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3729{
3731 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3732}
3733
3734static VALUE
3735ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3736{
3738 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3739}
3740
3741static VALUE
3742ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3743{
3745 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3746}
3747
3748static VALUE
3749ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3750{
3752 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3753}
3754
3755static VALUE
3756ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3757{
3759 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3760}
3761
3762static VALUE
3763ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3764{
3766 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3767}
3768
3769static VALUE
3770ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3771{
3773 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3774}
3775
3776static VALUE
3777ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3778{
3780 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3781}
3782
3783static VALUE
3784ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3785{
3787 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3788}
3789
3790static VALUE
3791ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3792{
3794 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3795}
3796
3797static inline int
3798vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3799{
3800 const int ov_flags = RAISED_STACKOVERFLOW;
3801 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3802 if (rb_ec_raised_p(ec, ov_flags)) {
3803 rb_ec_raised_reset(ec, ov_flags);
3804 return TRUE;
3805 }
3806 return FALSE;
3807}
3808
3809#define CHECK_CFP_CONSISTENCY(func) \
3810 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3811 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3812
3813static inline
3814const rb_method_cfunc_t *
3815vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3816{
3817#if VM_DEBUG_VERIFY_METHOD_CACHE
3818 switch (me->def->type) {
3819 case VM_METHOD_TYPE_CFUNC:
3820 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3821 break;
3822# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3823 METHOD_BUG(ISEQ);
3824 METHOD_BUG(ATTRSET);
3825 METHOD_BUG(IVAR);
3826 METHOD_BUG(BMETHOD);
3827 METHOD_BUG(ZSUPER);
3828 METHOD_BUG(UNDEF);
3829 METHOD_BUG(OPTIMIZED);
3830 METHOD_BUG(MISSING);
3831 METHOD_BUG(REFINED);
3832 METHOD_BUG(ALIAS);
3833# undef METHOD_BUG
3834 default:
3835 rb_bug("wrong method type: %d", me->def->type);
3836 }
3837#endif
3838 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3839}
3840
3841static VALUE
3842vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3843 int argc, VALUE *argv, VALUE *stack_bottom)
3844{
3845 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3846 const struct rb_callinfo *ci = calling->cd->ci;
3847 const struct rb_callcache *cc = calling->cc;
3848 VALUE val;
3849 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3850 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3851
3852 VALUE recv = calling->recv;
3853 VALUE block_handler = calling->block_handler;
3854 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3855
3856 if (UNLIKELY(calling->kw_splat)) {
3857 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3858 }
3859
3860 VM_ASSERT(reg_cfp == ec->cfp);
3861
3862 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3863 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3864
3865 vm_push_frame(ec, NULL, frame_type, recv,
3866 block_handler, (VALUE)me,
3867 0, ec->cfp->sp, 0, 0);
3868
3869 int len = cfunc->argc;
3870 if (len >= 0) rb_check_arity(argc, len, len);
3871
3872 reg_cfp->sp = stack_bottom;
3873 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3874
3875 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3876
3877 rb_vm_pop_frame(ec);
3878
3879 VM_ASSERT(ec->cfp->sp == stack_bottom);
3880
3881 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3882 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3883
3884 return val;
3885}
3886
3887// Push a C method frame for a given cme. This is called when JIT code skipped
3888// pushing a frame but the C method reached a point where a frame is needed.
3889void
3890rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3891{
3892 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3893 rb_execution_context_t *ec = GET_EC();
3894 VALUE *sp = ec->cfp->sp;
3895 VALUE recv = *(sp - recv_idx - 1);
3896 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3897 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3898#if VM_CHECK_MODE > 0
3899 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3900 *(GET_EC()->cfp->sp) = Qfalse;
3901#endif
3902 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3903}
3904
3905// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3906bool
3907rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3908{
3909 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3910}
3911
3912static VALUE
3913vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3914{
3915 int argc = calling->argc;
3916 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3917 VALUE *argv = &stack_bottom[1];
3918
3919 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3920}
3921
3922static VALUE
3923vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3924{
3925 const struct rb_callinfo *ci = calling->cd->ci;
3926 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3927
3928 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3929 VALUE argv_ary;
3930 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3931 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3932 int argc = RARRAY_LENINT(argv_ary);
3933 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3934 VALUE *stack_bottom = reg_cfp->sp - 2;
3935
3936 VM_ASSERT(calling->argc == 1);
3937 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3938 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3939
3940 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3941 }
3942 else {
3943 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3944
3945 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3946 }
3947}
3948
3949static inline VALUE
3950vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3951{
3952 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3953 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3954
3955 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3956 return vm_call_cfunc_other(ec, reg_cfp, calling);
3957 }
3958
3959 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3960 calling->kw_splat = 0;
3961 int i;
3962 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3963 VALUE *sp = stack_bottom;
3964 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3965 for(i = 0; i < argc; i++) {
3966 *++sp = argv[i];
3967 }
3968 reg_cfp->sp = sp+1;
3969
3970 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3971}
3972
3973static inline VALUE
3974vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3975{
3976 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3977 VALUE argv_ary = reg_cfp->sp[-1];
3978 int argc = RARRAY_LENINT(argv_ary);
3979 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3980 VALUE last_hash;
3981 int argc_offset = 0;
3982
3983 if (UNLIKELY(argc > 0 &&
3984 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3985 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3986 if (!RHASH_EMPTY_P(last_hash)) {
3987 return vm_call_cfunc_other(ec, reg_cfp, calling);
3988 }
3989 argc_offset++;
3990 }
3991 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3992}
3993
3994static inline VALUE
3995vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3996{
3997 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3998 VALUE keyword_hash = reg_cfp->sp[-1];
3999
4000 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4001 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4002 }
4003
4004 return vm_call_cfunc_other(ec, reg_cfp, calling);
4005}
4006
4007static VALUE
4008vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4009{
4010 const struct rb_callinfo *ci = calling->cd->ci;
4011 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4012
4013 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4014 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4015 // f(*a)
4016 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4017 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4018 }
4019 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4020 // f(*a, **kw)
4021 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4022 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4023 }
4024 }
4025
4026 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4027 return vm_call_cfunc_other(ec, reg_cfp, calling);
4028}
4029
4030static VALUE
4031vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4032{
4033 const struct rb_callcache *cc = calling->cc;
4034 RB_DEBUG_COUNTER_INC(ccf_ivar);
4035 cfp->sp -= 1;
4036 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4037 return ivar;
4038}
4039
4040static VALUE
4041vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4042{
4043 RB_DEBUG_COUNTER_INC(ccf_attrset);
4044 VALUE val = *(cfp->sp - 1);
4045 cfp->sp -= 2;
4046 attr_index_t index;
4047 shape_id_t dest_shape_id;
4048 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4049 ID id = vm_cc_cme(cc)->def->body.attr.id;
4050 rb_check_frozen(obj);
4051 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4052 if (UNDEF_P(res)) {
4053 switch (BUILTIN_TYPE(obj)) {
4054 case T_OBJECT:
4055 break;
4056 case T_CLASS:
4057 case T_MODULE:
4058 {
4059 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4060 if (!UNDEF_P(res)) {
4061 return res;
4062 }
4063 }
4064 break;
4065 default:
4066 {
4067 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4068 if (!UNDEF_P(res)) {
4069 return res;
4070 }
4071 }
4072 }
4073 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4074 }
4075 return res;
4076}
4077
4078static VALUE
4079vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4080{
4081 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4082}
4083
4084static inline VALUE
4085vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4086{
4087 rb_proc_t *proc;
4088 VALUE val;
4089 const struct rb_callcache *cc = calling->cc;
4090 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4091 VALUE procv = cme->def->body.bmethod.proc;
4092
4093 if (!RB_OBJ_SHAREABLE_P(procv) &&
4094 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4095 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4096 }
4097
4098 /* control block frame */
4099 GetProcPtr(procv, proc);
4100 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4101
4102 return val;
4103}
4104
4105static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4106
4107static VALUE
4108vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4109{
4110 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4111
4112 const struct rb_callcache *cc = calling->cc;
4113 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4114 VALUE procv = cme->def->body.bmethod.proc;
4115
4116 if (!RB_OBJ_SHAREABLE_P(procv) &&
4117 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4118 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4119 }
4120
4121 rb_proc_t *proc;
4122 GetProcPtr(procv, proc);
4123 const struct rb_block *block = &proc->block;
4124
4125 while (vm_block_type(block) == block_type_proc) {
4126 block = vm_proc_block(block->as.proc);
4127 }
4128 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4129
4130 const struct rb_captured_block *captured = &block->as.captured;
4131 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4132 VALUE * const argv = cfp->sp - calling->argc;
4133 const int arg_size = ISEQ_BODY(iseq)->param.size;
4134
4135 int opt_pc;
4136 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4137 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4138 }
4139 else {
4140 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4141 }
4142
4143 cfp->sp = argv - 1; // -1 for the receiver
4144
4145 vm_push_frame(ec, iseq,
4146 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4147 calling->recv,
4148 VM_GUARDED_PREV_EP(captured->ep),
4149 (VALUE)cme,
4150 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4151 argv + arg_size,
4152 ISEQ_BODY(iseq)->local_table_size - arg_size,
4153 ISEQ_BODY(iseq)->stack_max);
4154
4155 return Qundef;
4156}
4157
4158static VALUE
4159vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4160{
4161 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4162
4163 VALUE *argv;
4164 int argc;
4165 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4166 if (UNLIKELY(calling->heap_argv)) {
4167 argv = RARRAY_PTR(calling->heap_argv);
4168 cfp->sp -= 2;
4169 }
4170 else {
4171 argc = calling->argc;
4172 argv = ALLOCA_N(VALUE, argc);
4173 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4174 cfp->sp += - argc - 1;
4175 }
4176
4177 return vm_call_bmethod_body(ec, calling, argv);
4178}
4179
4180static VALUE
4181vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4182{
4183 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4184
4185 const struct rb_callcache *cc = calling->cc;
4186 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4187 VALUE procv = cme->def->body.bmethod.proc;
4188 rb_proc_t *proc;
4189 GetProcPtr(procv, proc);
4190 const struct rb_block *block = &proc->block;
4191
4192 while (vm_block_type(block) == block_type_proc) {
4193 block = vm_proc_block(block->as.proc);
4194 }
4195 if (vm_block_type(block) == block_type_iseq) {
4196 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4197 return vm_call_iseq_bmethod(ec, cfp, calling);
4198 }
4199
4200 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4201 return vm_call_noniseq_bmethod(ec, cfp, calling);
4202}
4203
4204VALUE
4205rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4206{
4207 VALUE klass = current_class;
4208
4209 /* for prepended Module, then start from cover class */
4210 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4211 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4212 klass = RBASIC_CLASS(klass);
4213 }
4214
4215 while (RTEST(klass)) {
4216 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4217 if (owner == target_owner) {
4218 return klass;
4219 }
4220 klass = RCLASS_SUPER(klass);
4221 }
4222
4223 return current_class; /* maybe module function */
4224}
4225
4226static const rb_callable_method_entry_t *
4227aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4228{
4229 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4230 const rb_callable_method_entry_t *cme;
4231
4232 if (orig_me->defined_class == 0) {
4233 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4234 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4235 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4236
4237 if (me->def->reference_count == 1) {
4238 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4239 }
4240 else {
4242 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4243 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4244 }
4245 }
4246 else {
4247 cme = (const rb_callable_method_entry_t *)orig_me;
4248 }
4249
4250 VM_ASSERT(callable_method_entry_p(cme));
4251 return cme;
4252}
4253
4255rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4256{
4257 return aliased_callable_method_entry(me);
4258}
4259
4260static VALUE
4261vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4262{
4263 calling->cc = &VM_CC_ON_STACK(Qundef,
4264 vm_call_general,
4265 {{0}},
4266 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4267
4268 return vm_call_method_each_type(ec, cfp, calling);
4269}
4270
4271static enum method_missing_reason
4272ci_missing_reason(const struct rb_callinfo *ci)
4273{
4274 enum method_missing_reason stat = MISSING_NOENTRY;
4275 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4276 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4277 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4278 return stat;
4279}
4280
4281static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4282
4283static VALUE
4284vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4285 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4286{
4287 ASSUME(calling->argc >= 0);
4288
4289 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4290 int argc = calling->argc;
4291 VALUE recv = calling->recv;
4292 VALUE klass = CLASS_OF(recv);
4293 ID mid = rb_check_id(&symbol);
4294 flags |= VM_CALL_OPT_SEND;
4295
4296 if (UNLIKELY(! mid)) {
4297 mid = idMethodMissing;
4298 missing_reason = ci_missing_reason(ci);
4299 ec->method_missing_reason = missing_reason;
4300
4301 VALUE argv_ary;
4302 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4303 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4304 rb_ary_unshift(argv_ary, symbol);
4305
4306 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4307 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4308 VALUE exc = rb_make_no_method_exception(
4309 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4310
4311 rb_exc_raise(exc);
4312 }
4313 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4314 }
4315 else {
4316 /* E.g. when argc == 2
4317 *
4318 * | | | | TOPN
4319 * | | +------+
4320 * | | +---> | arg1 | 0
4321 * +------+ | +------+
4322 * | arg1 | -+ +-> | arg0 | 1
4323 * +------+ | +------+
4324 * | arg0 | ---+ | sym | 2
4325 * +------+ +------+
4326 * | recv | | recv | 3
4327 * --+------+--------+------+------
4328 */
4329 int i = argc;
4330 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4331 INC_SP(1);
4332 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4333 argc = ++calling->argc;
4334
4335 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4336 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4337 TOPN(i) = symbol;
4338 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4339 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4340 VALUE exc = rb_make_no_method_exception(
4341 rb_eNoMethodError, 0, recv, argc, argv, priv);
4342
4343 rb_exc_raise(exc);
4344 }
4345 else {
4346 TOPN(i) = rb_str_intern(symbol);
4347 }
4348 }
4349 }
4350
4351 struct rb_forwarding_call_data new_fcd = {
4352 .cd = {
4353 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4354 .cc = NULL,
4355 },
4356 .caller_ci = NULL,
4357 };
4358
4359 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4360 calling->cd = &new_fcd.cd;
4361 }
4362 else {
4363 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4364 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4365 new_fcd.caller_ci = caller_ci;
4366 calling->cd = (struct rb_call_data *)&new_fcd;
4367 }
4368 calling->cc = &VM_CC_ON_STACK(klass,
4369 vm_call_general,
4370 { .method_missing_reason = missing_reason },
4371 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4372
4373 if (flags & VM_CALL_FCALL) {
4374 return vm_call_method(ec, reg_cfp, calling);
4375 }
4376
4377 const struct rb_callcache *cc = calling->cc;
4378 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4379
4380 if (vm_cc_cme(cc) != NULL) {
4381 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4382 case METHOD_VISI_PUBLIC: /* likely */
4383 return vm_call_method_each_type(ec, reg_cfp, calling);
4384 case METHOD_VISI_PRIVATE:
4385 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4386 break;
4387 case METHOD_VISI_PROTECTED:
4388 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4389 break;
4390 default:
4391 VM_UNREACHABLE(vm_call_method);
4392 }
4393 return vm_call_method_missing(ec, reg_cfp, calling);
4394 }
4395
4396 return vm_call_method_nome(ec, reg_cfp, calling);
4397}
4398
4399static VALUE
4400vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4401{
4402 const struct rb_callinfo *ci = calling->cd->ci;
4403 int i;
4404 VALUE sym;
4405
4406 i = calling->argc - 1;
4407
4408 if (calling->argc == 0) {
4409 rb_raise(rb_eArgError, "no method name given");
4410 }
4411
4412 sym = TOPN(i);
4413 /* E.g. when i == 2
4414 *
4415 * | | | | TOPN
4416 * +------+ | |
4417 * | arg1 | ---+ | | 0
4418 * +------+ | +------+
4419 * | arg0 | -+ +-> | arg1 | 1
4420 * +------+ | +------+
4421 * | sym | +---> | arg0 | 2
4422 * +------+ +------+
4423 * | recv | | recv | 3
4424 * --+------+--------+------+------
4425 */
4426 /* shift arguments */
4427 if (i > 0) {
4428 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4429 }
4430 calling->argc -= 1;
4431 DEC_SP(1);
4432
4433 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4434}
4435
4436static VALUE
4437vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4438{
4439 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4440 const struct rb_callinfo *ci = calling->cd->ci;
4441 int flags = VM_CALL_FCALL;
4442 VALUE sym;
4443
4444 VALUE argv_ary;
4445 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4446 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4447 sym = rb_ary_shift(argv_ary);
4448 flags |= VM_CALL_ARGS_SPLAT;
4449 if (calling->kw_splat) {
4450 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4451 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4452 calling->kw_splat = 0;
4453 }
4454 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4455 }
4456
4457 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4458 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4459}
4460
4461static VALUE
4462vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4463{
4464 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4465 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4466}
4467
4468static VALUE
4469vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4470{
4471 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4472
4473 const struct rb_callinfo *ci = calling->cd->ci;
4474 int flags = vm_ci_flag(ci);
4475
4476 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4477 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4478 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4479 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4480 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4481 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4482 }
4483
4484 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4485 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4486}
4487
4488static VALUE
4489vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4490 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4491{
4492 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4493
4494 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4495 unsigned int argc, flag;
4496
4497 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4498 argc = ++calling->argc;
4499
4500 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4501 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4502 vm_check_canary(ec, reg_cfp->sp);
4503 if (argc > 1) {
4504 MEMMOVE(argv+1, argv, VALUE, argc-1);
4505 }
4506 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4507 INC_SP(1);
4508
4509 ec->method_missing_reason = reason;
4510
4511 struct rb_forwarding_call_data new_fcd = {
4512 .cd = {
4513 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4514 .cc = NULL,
4515 },
4516 .caller_ci = NULL,
4517 };
4518
4519 if (!(flag & VM_CALL_FORWARDING)) {
4520 calling->cd = &new_fcd.cd;
4521 }
4522 else {
4523 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4524 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4525 new_fcd.caller_ci = caller_ci;
4526 calling->cd = (struct rb_call_data *)&new_fcd;
4527 }
4528
4529 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4530 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4531 return vm_call_method(ec, reg_cfp, calling);
4532}
4533
4534static VALUE
4535vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4536{
4537 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4538}
4539
4540static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4541static VALUE
4542vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4543{
4544 klass = RCLASS_SUPER(klass);
4545
4546 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4547 if (cme == NULL) {
4548 return vm_call_method_nome(ec, cfp, calling);
4549 }
4550 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4551 cme->def->body.refined.orig_me) {
4552 cme = refined_method_callable_without_refinement(cme);
4553 }
4554
4555 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4556
4557 return vm_call_method_each_type(ec, cfp, calling);
4558}
4559
4560static inline VALUE
4561find_refinement(VALUE refinements, VALUE klass)
4562{
4563 if (NIL_P(refinements)) {
4564 return Qnil;
4565 }
4566 return rb_hash_lookup(refinements, klass);
4567}
4568
4569PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4570static rb_control_frame_t *
4571current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4572{
4573 rb_control_frame_t *top_cfp = cfp;
4574
4575 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4576 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4577
4578 do {
4579 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4580 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4581 /* TODO: orphan block */
4582 return top_cfp;
4583 }
4584 } while (cfp->iseq != local_iseq);
4585 }
4586 return cfp;
4587}
4588
4589static const rb_callable_method_entry_t *
4590refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4591{
4592 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4593 const rb_callable_method_entry_t *cme;
4594
4595 if (orig_me->defined_class == 0) {
4596 cme = NULL;
4598 }
4599 else {
4600 cme = (const rb_callable_method_entry_t *)orig_me;
4601 }
4602
4603 VM_ASSERT(callable_method_entry_p(cme));
4604
4605 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4606 cme = NULL;
4607 }
4608
4609 return cme;
4610}
4611
4612static const rb_callable_method_entry_t *
4613search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4614{
4615 ID mid = vm_ci_mid(calling->cd->ci);
4616 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4617 const struct rb_callcache * const cc = calling->cc;
4618 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4619
4620 for (; cref; cref = CREF_NEXT(cref)) {
4621 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4622 if (NIL_P(refinement)) continue;
4623
4624 const rb_callable_method_entry_t *const ref_me =
4625 rb_callable_method_entry(refinement, mid);
4626
4627 if (ref_me) {
4628 if (vm_cc_call(cc) == vm_call_super_method) {
4629 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4630 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4631 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4632 continue;
4633 }
4634 }
4635
4636 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4637 cme->def != ref_me->def) {
4638 cme = ref_me;
4639 }
4640 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4641 return cme;
4642 }
4643 }
4644 else {
4645 return NULL;
4646 }
4647 }
4648
4649 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4650 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4651 }
4652 else {
4653 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4654 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4655 return cme;
4656 }
4657}
4658
4659static VALUE
4660vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4661{
4662 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4663
4664 if (ref_cme) {
4665 if (calling->cd->cc) {
4666 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4667 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4668 return vm_call_method(ec, cfp, calling);
4669 }
4670 else {
4671 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4672 calling->cc= ref_cc;
4673 return vm_call_method(ec, cfp, calling);
4674 }
4675 }
4676 else {
4677 return vm_call_method_nome(ec, cfp, calling);
4678 }
4679}
4680
4681static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4682
4683NOINLINE(static VALUE
4684 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4685 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4686
4687static VALUE
4688vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4689 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4690{
4691 int argc = calling->argc;
4692
4693 /* remove self */
4694 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4695 DEC_SP(1);
4696
4697 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4698}
4699
4700static VALUE
4701vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4702{
4703 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4704
4705 const struct rb_callinfo *ci = calling->cd->ci;
4706 VALUE procval = calling->recv;
4707 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4708}
4709
4710static VALUE
4711vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4712{
4713 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4714
4715 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4716 const struct rb_callinfo *ci = calling->cd->ci;
4717
4718 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4719 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4720 }
4721 else {
4722 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4723 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4724 return vm_call_general(ec, reg_cfp, calling);
4725 }
4726}
4727
4728static VALUE
4729vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4730{
4731 VALUE recv = calling->recv;
4732
4733 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4734 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4735 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4736
4737 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4738 return internal_RSTRUCT_GET(recv, off);
4739}
4740
4741static VALUE
4742vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4743{
4744 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4745
4746 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4747 reg_cfp->sp -= 1;
4748 return ret;
4749}
4750
4751static VALUE
4752vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4753{
4754 VALUE recv = calling->recv;
4755
4756 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4757 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4758 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4759
4760 rb_check_frozen(recv);
4761
4762 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4763 internal_RSTRUCT_SET(recv, off, val);
4764
4765 return val;
4766}
4767
4768static VALUE
4769vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4770{
4771 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4772
4773 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4774 reg_cfp->sp -= 2;
4775 return ret;
4776}
4777
4778NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4779 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4780
4781#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4782 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4783 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4784 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4785 var = func; \
4786 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4787 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4788 } \
4789 else { \
4790 nohook; \
4791 var = func; \
4792 }
4793
4794static VALUE
4795vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4796 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4797{
4798 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4799 case OPTIMIZED_METHOD_TYPE_SEND:
4800 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4801 return vm_call_opt_send(ec, cfp, calling);
4802 case OPTIMIZED_METHOD_TYPE_CALL:
4803 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4804 return vm_call_opt_call(ec, cfp, calling);
4805 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4806 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4807 return vm_call_opt_block_call(ec, cfp, calling);
4808 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4809 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4810 rb_check_arity(calling->argc, 0, 0);
4811
4812 VALUE v;
4813 VM_CALL_METHOD_ATTR(v,
4814 vm_call_opt_struct_aref(ec, cfp, calling),
4815 set_vm_cc_ivar(cc); \
4816 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4817 return v;
4818 }
4819 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4820 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4821 rb_check_arity(calling->argc, 1, 1);
4822
4823 VALUE v;
4824 VM_CALL_METHOD_ATTR(v,
4825 vm_call_opt_struct_aset(ec, cfp, calling),
4826 set_vm_cc_ivar(cc); \
4827 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4828 return v;
4829 }
4830 default:
4831 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4832 }
4833}
4834
4835static VALUE
4836vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4837{
4838 const struct rb_callinfo *ci = calling->cd->ci;
4839 const struct rb_callcache *cc = calling->cc;
4840 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4841 VALUE v;
4842
4843 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4844
4845 switch (cme->def->type) {
4846 case VM_METHOD_TYPE_ISEQ:
4847 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4848 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4849 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4850 }
4851 else {
4852 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4853 return vm_call_iseq_setup(ec, cfp, calling);
4854 }
4855
4856 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4857 case VM_METHOD_TYPE_CFUNC:
4858 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4859 return vm_call_cfunc(ec, cfp, calling);
4860
4861 case VM_METHOD_TYPE_ATTRSET:
4862 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4863
4864 rb_check_arity(calling->argc, 1, 1);
4865
4866 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4867
4868 if (vm_cc_markable(cc)) {
4869 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4870 VM_CALL_METHOD_ATTR(v,
4871 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4872 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4873 }
4874 else {
4875 cc = &((struct rb_callcache) {
4876 .flags = T_IMEMO |
4877 (imemo_callcache << FL_USHIFT) |
4878 VM_CALLCACHE_UNMARKABLE |
4879 VM_CALLCACHE_ON_STACK,
4880 .klass = cc->klass,
4881 .cme_ = cc->cme_,
4882 .call_ = cc->call_,
4883 .aux_ = {
4884 .attr = {
4885 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4886 }
4887 },
4888 });
4889
4890 VM_CALL_METHOD_ATTR(v,
4891 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4892 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4893 }
4894 return v;
4895
4896 case VM_METHOD_TYPE_IVAR:
4897 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4898 rb_check_arity(calling->argc, 0, 0);
4899 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4900 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4901 VM_CALL_METHOD_ATTR(v,
4902 vm_call_ivar(ec, cfp, calling),
4903 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4904 return v;
4905
4906 case VM_METHOD_TYPE_MISSING:
4907 vm_cc_method_missing_reason_set(cc, 0);
4908 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4909 return vm_call_method_missing(ec, cfp, calling);
4910
4911 case VM_METHOD_TYPE_BMETHOD:
4912 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4913 return vm_call_bmethod(ec, cfp, calling);
4914
4915 case VM_METHOD_TYPE_ALIAS:
4916 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4917 return vm_call_alias(ec, cfp, calling);
4918
4919 case VM_METHOD_TYPE_OPTIMIZED:
4920 return vm_call_optimized(ec, cfp, calling, ci, cc);
4921
4922 case VM_METHOD_TYPE_UNDEF:
4923 break;
4924
4925 case VM_METHOD_TYPE_ZSUPER:
4926 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4927
4928 case VM_METHOD_TYPE_REFINED:
4929 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4930 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4931 return vm_call_refined(ec, cfp, calling);
4932 }
4933
4934 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4935}
4936
4937NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4938
4939static VALUE
4940vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4941{
4942 /* method missing */
4943 const struct rb_callinfo *ci = calling->cd->ci;
4944 const int stat = ci_missing_reason(ci);
4945
4946 if (vm_ci_mid(ci) == idMethodMissing) {
4947 if (UNLIKELY(calling->heap_argv)) {
4948 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4949 }
4950 else {
4951 rb_control_frame_t *reg_cfp = cfp;
4952 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4953 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4954 }
4955 }
4956 else {
4957 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4958 }
4959}
4960
4961/* Protected method calls and super invocations need to check that the receiver
4962 * (self for super) inherits the module on which the method is defined.
4963 * In the case of refinements, it should consider the original class not the
4964 * refinement.
4965 */
4966static VALUE
4967vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4968{
4969 VALUE defined_class = me->defined_class;
4970 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4971 return NIL_P(refined_class) ? defined_class : refined_class;
4972}
4973
4974static inline VALUE
4975vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4976{
4977 const struct rb_callinfo *ci = calling->cd->ci;
4978 const struct rb_callcache *cc = calling->cc;
4979
4980 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4981
4982 if (vm_cc_cme(cc) != NULL) {
4983 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4984 case METHOD_VISI_PUBLIC: /* likely */
4985 return vm_call_method_each_type(ec, cfp, calling);
4986
4987 case METHOD_VISI_PRIVATE:
4988 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4989 enum method_missing_reason stat = MISSING_PRIVATE;
4990 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4991
4992 vm_cc_method_missing_reason_set(cc, stat);
4993 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4994 return vm_call_method_missing(ec, cfp, calling);
4995 }
4996 return vm_call_method_each_type(ec, cfp, calling);
4997
4998 case METHOD_VISI_PROTECTED:
4999 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5000 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5001 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5002 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5003 return vm_call_method_missing(ec, cfp, calling);
5004 }
5005 else {
5006 /* caching method info to dummy cc */
5007 VM_ASSERT(vm_cc_cme(cc) != NULL);
5008 struct rb_callcache cc_on_stack = *cc;
5009 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5010 calling->cc = &cc_on_stack;
5011 return vm_call_method_each_type(ec, cfp, calling);
5012 }
5013 }
5014 return vm_call_method_each_type(ec, cfp, calling);
5015
5016 default:
5017 rb_bug("unreachable");
5018 }
5019 }
5020 else {
5021 return vm_call_method_nome(ec, cfp, calling);
5022 }
5023}
5024
5025static VALUE
5026vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5027{
5028 RB_DEBUG_COUNTER_INC(ccf_general);
5029 return vm_call_method(ec, reg_cfp, calling);
5030}
5031
5032void
5033rb_vm_cc_general(const struct rb_callcache *cc)
5034{
5035 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5036 VM_ASSERT(cc != vm_cc_empty());
5037
5038 *(vm_call_handler *)&cc->call_ = vm_call_general;
5039}
5040
5041static VALUE
5042vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5043{
5044 RB_DEBUG_COUNTER_INC(ccf_super_method);
5045
5046 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5047 // can merge the function and the address of the function becomes same.
5048 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5049 if (ec == NULL) rb_bug("unreachable");
5050
5051 /* this check is required to distinguish with other functions. */
5052 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5053 return vm_call_method(ec, reg_cfp, calling);
5054}
5055
5056/* super */
5057
5058static inline VALUE
5059vm_search_normal_superclass(VALUE klass)
5060{
5061 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5062 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5063 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5064 klass = RBASIC(klass)->klass;
5065 }
5066 klass = RCLASS_ORIGIN(klass);
5067 return RCLASS_SUPER(klass);
5068}
5069
5070NORETURN(static void vm_super_outside(void));
5071
5072static void
5073vm_super_outside(void)
5074{
5075 rb_raise(rb_eNoMethodError, "super called outside of method");
5076}
5077
5078static const struct rb_callcache *
5079empty_cc_for_super(void)
5080{
5081 return &vm_empty_cc_for_super;
5082}
5083
5084static const struct rb_callcache *
5085vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5086{
5087 VALUE current_defined_class;
5088 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5089
5090 if (!me) {
5091 vm_super_outside();
5092 }
5093
5094 current_defined_class = vm_defined_class_for_protected_call(me);
5095
5096 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5097 reg_cfp->iseq != method_entry_iseqptr(me) &&
5098 !rb_obj_is_kind_of(recv, current_defined_class)) {
5099 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5100 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5101
5102 if (m) { /* not bound UnboundMethod */
5103 rb_raise(rb_eTypeError,
5104 "self has wrong type to call super in this context: "
5105 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5106 rb_obj_class(recv), m);
5107 }
5108 }
5109
5110 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5111 rb_raise(rb_eRuntimeError,
5112 "implicit argument passing of super from method defined"
5113 " by define_method() is not supported."
5114 " Specify all arguments explicitly.");
5115 }
5116
5117 ID mid = me->def->original_id;
5118
5119 if (!vm_ci_markable(cd->ci)) {
5120 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5121 }
5122 else {
5123 // update iseq. really? (TODO)
5124 cd->ci = vm_ci_new_runtime(mid,
5125 vm_ci_flag(cd->ci),
5126 vm_ci_argc(cd->ci),
5127 vm_ci_kwarg(cd->ci));
5128
5129 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5130 }
5131
5132 const struct rb_callcache *cc;
5133
5134 VALUE klass = vm_search_normal_superclass(me->defined_class);
5135
5136 if (!klass) {
5137 /* bound instance method of module */
5138 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5139 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5140 }
5141 else {
5142 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5143 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5144
5145 // define_method can cache for different method id
5146 if (cached_cme == NULL) {
5147 // empty_cc_for_super is not markable object
5148 cd->cc = empty_cc_for_super();
5149 }
5150 else if (cached_cme->called_id != mid) {
5151 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5152 if (cme) {
5153 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5154 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5155 }
5156 else {
5157 cd->cc = cc = empty_cc_for_super();
5158 }
5159 }
5160 else {
5161 switch (cached_cme->def->type) {
5162 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5163 case VM_METHOD_TYPE_REFINED:
5164 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5165 case VM_METHOD_TYPE_ATTRSET:
5166 case VM_METHOD_TYPE_IVAR:
5167 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5168 break;
5169 default:
5170 break; // use fastpath
5171 }
5172 }
5173 }
5174
5175 VM_ASSERT((vm_cc_cme(cc), true));
5176
5177 return cc;
5178}
5179
5180/* yield */
5181
5182static inline int
5183block_proc_is_lambda(const VALUE procval)
5184{
5185 rb_proc_t *proc;
5186
5187 if (procval) {
5188 GetProcPtr(procval, proc);
5189 return proc->is_lambda;
5190 }
5191 else {
5192 return 0;
5193 }
5194}
5195
5196static inline const rb_namespace_t *
5197block_proc_namespace(const VALUE procval)
5198{
5199 rb_proc_t *proc;
5200
5201 if (procval) {
5202 GetProcPtr(procval, proc);
5203 return proc->ns;
5204 }
5205 else {
5206 return NULL;
5207 }
5208}
5209
5210static VALUE
5211vm_yield_with_cfunc(rb_execution_context_t *ec,
5212 const struct rb_captured_block *captured,
5213 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5215{
5216 int is_lambda = FALSE; /* TODO */
5217 VALUE val, arg, blockarg;
5218 int frame_flag;
5219 const struct vm_ifunc *ifunc = captured->code.ifunc;
5220
5221 if (is_lambda) {
5222 arg = rb_ary_new4(argc, argv);
5223 }
5224 else if (argc == 0) {
5225 arg = Qnil;
5226 }
5227 else {
5228 arg = argv[0];
5229 }
5230
5231 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5232
5233 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5234 if (kw_splat) {
5235 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5236 }
5237
5238 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5239 frame_flag,
5240 self,
5241 VM_GUARDED_PREV_EP(captured->ep),
5242 (VALUE)me,
5243 0, ec->cfp->sp, 0, 0);
5244 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5245 rb_vm_pop_frame(ec);
5246
5247 return val;
5248}
5249
5250VALUE
5251rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5252{
5253 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5254}
5255
5256static VALUE
5257vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5258{
5259 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5260}
5261
5262static inline int
5263vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5264{
5265 int i;
5266 long len = RARRAY_LEN(ary);
5267
5268 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5269
5270 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5271 argv[i] = RARRAY_AREF(ary, i);
5272 }
5273
5274 return i;
5275}
5276
5277static inline VALUE
5278vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5279{
5280 VALUE ary, arg0 = argv[0];
5281 ary = rb_check_array_type(arg0);
5282#if 0
5283 argv[0] = arg0;
5284#else
5285 VM_ASSERT(argv[0] == arg0);
5286#endif
5287 return ary;
5288}
5289
5290static int
5291vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5292{
5293 if (rb_simple_iseq_p(iseq)) {
5294 rb_control_frame_t *cfp = ec->cfp;
5295 VALUE arg0;
5296
5297 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5298
5299 if (arg_setup_type == arg_setup_block &&
5300 calling->argc == 1 &&
5301 ISEQ_BODY(iseq)->param.flags.has_lead &&
5302 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5303 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5304 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5305 }
5306
5307 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5308 if (arg_setup_type == arg_setup_block) {
5309 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5310 int i;
5311 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5312 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5313 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5314 }
5315 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5316 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5317 }
5318 }
5319 else {
5320 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5321 }
5322 }
5323
5324 return 0;
5325 }
5326 else {
5327 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5328 }
5329}
5330
5331static int
5332vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5333{
5334 struct rb_calling_info calling_entry, *calling;
5335
5336 calling = &calling_entry;
5337 calling->argc = argc;
5338 calling->block_handler = block_handler;
5339 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5340 calling->recv = Qundef;
5341 calling->heap_argv = 0;
5342 calling->cc = NULL;
5343 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5344
5345 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5346}
5347
5348/* ruby iseq -> ruby block */
5349
5350static VALUE
5351vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5352 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5353 bool is_lambda, VALUE block_handler)
5354{
5355 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5356 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5357 const int arg_size = ISEQ_BODY(iseq)->param.size;
5358 VALUE * const rsp = GET_SP() - calling->argc;
5359 VALUE * const argv = rsp;
5360 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5361 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5362
5363 SET_SP(rsp);
5364
5365 if (calling->proc_ns) {
5366 frame_flag |= VM_FRAME_FLAG_NS_SWITCH;
5367 }
5368
5369 vm_push_frame(ec, iseq,
5370 frame_flag,
5371 captured->self,
5372 VM_GUARDED_PREV_EP(captured->ep), 0,
5373 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5374 rsp + arg_size,
5375 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5376
5377 return Qundef;
5378}
5379
5380static VALUE
5381vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5382 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5383 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5384{
5385 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5386 int flags = vm_ci_flag(ci);
5387
5388 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5389 ((calling->argc == 0) ||
5390 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5391 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5392 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5393 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5394 flags = 0;
5395 if (UNLIKELY(calling->heap_argv)) {
5396#if VM_ARGC_STACK_MAX < 0
5397 if (RARRAY_LEN(calling->heap_argv) < 1) {
5398 rb_raise(rb_eArgError, "no receiver given");
5399 }
5400#endif
5401 calling->recv = rb_ary_shift(calling->heap_argv);
5402 // Modify stack to avoid cfp consistency error
5403 reg_cfp->sp++;
5404 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5405 reg_cfp->sp[-2] = calling->recv;
5406 flags |= VM_CALL_ARGS_SPLAT;
5407 }
5408 else {
5409 if (calling->argc < 1) {
5410 rb_raise(rb_eArgError, "no receiver given");
5411 }
5412 calling->recv = TOPN(--calling->argc);
5413 }
5414 if (calling->kw_splat) {
5415 flags |= VM_CALL_KW_SPLAT;
5416 }
5417 }
5418 else {
5419 if (calling->argc < 1) {
5420 rb_raise(rb_eArgError, "no receiver given");
5421 }
5422 calling->recv = TOPN(--calling->argc);
5423 }
5424
5425 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5426}
5427
5428static VALUE
5429vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5430 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5431 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5432{
5433 VALUE val;
5434 int argc;
5435 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5436 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5437 argc = calling->argc;
5438 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5439 POPN(argc); /* TODO: should put before C/yield? */
5440 return val;
5441}
5442
5443static VALUE
5444vm_proc_to_block_handler(VALUE procval)
5445{
5446 const struct rb_block *block = vm_proc_block(procval);
5447
5448 switch (vm_block_type(block)) {
5449 case block_type_iseq:
5450 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5451 case block_type_ifunc:
5452 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5453 case block_type_symbol:
5454 return VM_BH_FROM_SYMBOL(block->as.symbol);
5455 case block_type_proc:
5456 return VM_BH_FROM_PROC(block->as.proc);
5457 }
5458 VM_UNREACHABLE(vm_yield_with_proc);
5459 return Qundef;
5460}
5461
5462static VALUE
5463vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5464 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5465 bool is_lambda, VALUE block_handler)
5466{
5467 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5468 VALUE proc = VM_BH_TO_PROC(block_handler);
5469 if (!calling->proc_ns) {
5470 calling->proc_ns = block_proc_namespace(proc);
5471 }
5472 is_lambda = block_proc_is_lambda(proc);
5473 block_handler = vm_proc_to_block_handler(proc);
5474 }
5475
5476 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5477}
5478
5479static inline VALUE
5480vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5481 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5482 bool is_lambda, VALUE block_handler)
5483{
5484 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5485 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5486 bool is_lambda, VALUE block_handler);
5487
5488 switch (vm_block_handler_type(block_handler)) {
5489 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5490 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5491 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5492 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5493 default: rb_bug("vm_invoke_block: unreachable");
5494 }
5495
5496 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5497}
5498
5499static VALUE
5500vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5501{
5502 const rb_execution_context_t *ec = GET_EC();
5503 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5504 struct rb_captured_block *captured;
5505
5506 if (cfp == 0) {
5507 rb_bug("vm_make_proc_with_iseq: unreachable");
5508 }
5509
5510 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5511 captured->code.iseq = blockiseq;
5512
5513 return rb_vm_make_proc(ec, captured, rb_cProc);
5514}
5515
5516static VALUE
5517vm_once_exec(VALUE iseq)
5518{
5519 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5520 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5521}
5522
5523static VALUE
5524vm_once_clear(VALUE data)
5525{
5526 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5527 is->once.running_thread = NULL;
5528 return Qnil;
5529}
5530
5531/* defined insn */
5532
5533static bool
5534check_respond_to_missing(VALUE obj, VALUE v)
5535{
5536 VALUE args[2];
5537 VALUE r;
5538
5539 args[0] = obj; args[1] = Qfalse;
5540 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5541 if (!UNDEF_P(r) && RTEST(r)) {
5542 return true;
5543 }
5544 else {
5545 return false;
5546 }
5547}
5548
5549static bool
5550vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5551{
5552 VALUE klass;
5553 enum defined_type type = (enum defined_type)op_type;
5554
5555 switch (type) {
5556 case DEFINED_IVAR:
5557 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5558 break;
5559 case DEFINED_GVAR:
5560 return rb_gvar_defined(SYM2ID(obj));
5561 break;
5562 case DEFINED_CVAR: {
5563 const rb_cref_t *cref = vm_get_cref(GET_EP());
5564 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5565 return rb_cvar_defined(klass, SYM2ID(obj));
5566 break;
5567 }
5568 case DEFINED_CONST:
5569 case DEFINED_CONST_FROM: {
5570 bool allow_nil = type == DEFINED_CONST;
5571 klass = v;
5572 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5573 break;
5574 }
5575 case DEFINED_FUNC:
5576 klass = CLASS_OF(v);
5577 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5578 break;
5579 case DEFINED_METHOD:{
5580 VALUE klass = CLASS_OF(v);
5581 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5582
5583 if (me) {
5584 switch (METHOD_ENTRY_VISI(me)) {
5585 case METHOD_VISI_PRIVATE:
5586 break;
5587 case METHOD_VISI_PROTECTED:
5588 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5589 break;
5590 }
5591 case METHOD_VISI_PUBLIC:
5592 return true;
5593 break;
5594 default:
5595 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5596 }
5597 }
5598 else {
5599 return check_respond_to_missing(obj, v);
5600 }
5601 break;
5602 }
5603 case DEFINED_YIELD:
5604 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5605 return true;
5606 }
5607 break;
5608 case DEFINED_ZSUPER:
5609 {
5610 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5611
5612 if (me) {
5613 VALUE klass = vm_search_normal_superclass(me->defined_class);
5614 if (!klass) return false;
5615
5616 ID id = me->def->original_id;
5617
5618 return rb_method_boundp(klass, id, 0);
5619 }
5620 }
5621 break;
5622 case DEFINED_REF:
5623 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5624 default:
5625 rb_bug("unimplemented defined? type (VM)");
5626 break;
5627 }
5628
5629 return false;
5630}
5631
5632bool
5633rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5634{
5635 return vm_defined(ec, reg_cfp, op_type, obj, v);
5636}
5637
5638static const VALUE *
5639vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5640{
5641 rb_num_t i;
5642 const VALUE *ep = reg_ep;
5643 for (i = 0; i < lv; i++) {
5644 ep = GET_PREV_EP(ep);
5645 }
5646 return ep;
5647}
5648
5649static VALUE
5650vm_get_special_object(const VALUE *const reg_ep,
5651 enum vm_special_object_type type)
5652{
5653 switch (type) {
5654 case VM_SPECIAL_OBJECT_VMCORE:
5655 return rb_mRubyVMFrozenCore;
5656 case VM_SPECIAL_OBJECT_CBASE:
5657 return vm_get_cbase(reg_ep);
5658 case VM_SPECIAL_OBJECT_CONST_BASE:
5659 return vm_get_const_base(reg_ep);
5660 default:
5661 rb_bug("putspecialobject insn: unknown value_type %d", type);
5662 }
5663}
5664
5665// ZJIT implementation is using the C function
5666// and needs to call a non-static function
5667VALUE
5668rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5669{
5670 return vm_get_special_object(reg_ep, type);
5671}
5672
5673static VALUE
5674vm_concat_array(VALUE ary1, VALUE ary2st)
5675{
5676 const VALUE ary2 = ary2st;
5677 VALUE tmp1 = rb_check_to_array(ary1);
5678 VALUE tmp2 = rb_check_to_array(ary2);
5679
5680 if (NIL_P(tmp1)) {
5681 tmp1 = rb_ary_new3(1, ary1);
5682 }
5683 if (tmp1 == ary1) {
5684 tmp1 = rb_ary_dup(ary1);
5685 }
5686
5687 if (NIL_P(tmp2)) {
5688 return rb_ary_push(tmp1, ary2);
5689 }
5690 else {
5691 return rb_ary_concat(tmp1, tmp2);
5692 }
5693}
5694
5695static VALUE
5696vm_concat_to_array(VALUE ary1, VALUE ary2st)
5697{
5698 /* ary1 must be a newly created array */
5699 const VALUE ary2 = ary2st;
5700
5701 if (NIL_P(ary2)) return ary1;
5702
5703 VALUE tmp2 = rb_check_to_array(ary2);
5704
5705 if (NIL_P(tmp2)) {
5706 return rb_ary_push(ary1, ary2);
5707 }
5708 else {
5709 return rb_ary_concat(ary1, tmp2);
5710 }
5711}
5712
5713// YJIT implementation is using the C function
5714// and needs to call a non-static function
5715VALUE
5716rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5717{
5718 return vm_concat_array(ary1, ary2st);
5719}
5720
5721VALUE
5722rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5723{
5724 return vm_concat_to_array(ary1, ary2st);
5725}
5726
5727static VALUE
5728vm_splat_array(VALUE flag, VALUE ary)
5729{
5730 if (NIL_P(ary)) {
5731 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5732 }
5733 VALUE tmp = rb_check_to_array(ary);
5734 if (NIL_P(tmp)) {
5735 return rb_ary_new3(1, ary);
5736 }
5737 else if (RTEST(flag)) {
5738 return rb_ary_dup(tmp);
5739 }
5740 else {
5741 return tmp;
5742 }
5743}
5744
5745// YJIT implementation is using the C function
5746// and needs to call a non-static function
5747VALUE
5748rb_vm_splat_array(VALUE flag, VALUE ary)
5749{
5750 return vm_splat_array(flag, ary);
5751}
5752
5753static VALUE
5754vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5755{
5756 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5757
5758 if (flag & VM_CHECKMATCH_ARRAY) {
5759 long i;
5760 const long n = RARRAY_LEN(pattern);
5761
5762 for (i = 0; i < n; i++) {
5763 VALUE v = RARRAY_AREF(pattern, i);
5764 VALUE c = check_match(ec, v, target, type);
5765
5766 if (RTEST(c)) {
5767 return c;
5768 }
5769 }
5770 return Qfalse;
5771 }
5772 else {
5773 return check_match(ec, pattern, target, type);
5774 }
5775}
5776
5777VALUE
5778rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5779{
5780 return vm_check_match(ec, target, pattern, flag);
5781}
5782
5783static VALUE
5784vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5785{
5786 const VALUE kw_bits = *(ep - bits);
5787
5788 if (FIXNUM_P(kw_bits)) {
5789 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5790 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5791 return Qfalse;
5792 }
5793 else {
5794 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5795 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5796 }
5797 return Qtrue;
5798}
5799
5800static void
5801vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5802{
5803 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5804 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5805 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5806 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5807
5808 switch (flag) {
5809 case RUBY_EVENT_CALL:
5810 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5811 return;
5812 case RUBY_EVENT_C_CALL:
5813 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5814 return;
5815 case RUBY_EVENT_RETURN:
5816 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5817 return;
5819 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5820 return;
5821 }
5822 }
5823}
5824
5825static VALUE
5826vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5827{
5828 if (!rb_const_defined_at(cbase, id)) {
5829 return 0;
5830 }
5831 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5832 return rb_public_const_get_at(cbase, id);
5833 }
5834 else {
5835 return rb_const_get_at(cbase, id);
5836 }
5837}
5838
5839static VALUE
5840vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5841{
5842 if (!RB_TYPE_P(klass, T_CLASS)) {
5843 return 0;
5844 }
5845 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5846 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5847
5848 if (tmp != super) {
5849 rb_raise(rb_eTypeError,
5850 "superclass mismatch for class %"PRIsVALUE"",
5851 rb_id2str(id));
5852 }
5853 else {
5854 return klass;
5855 }
5856 }
5857 else {
5858 return klass;
5859 }
5860}
5861
5862static VALUE
5863vm_check_if_module(ID id, VALUE mod)
5864{
5865 if (!RB_TYPE_P(mod, T_MODULE)) {
5866 return 0;
5867 }
5868 else {
5869 return mod;
5870 }
5871}
5872
5873static VALUE
5874declare_under(ID id, VALUE cbase, VALUE c)
5875{
5876 rb_set_class_path_string(c, cbase, rb_id2str(id));
5877 rb_const_set(cbase, id, c);
5878 return c;
5879}
5880
5881static VALUE
5882vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5883{
5884 /* new class declaration */
5885 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5886 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5888 rb_class_inherited(s, c);
5889 return c;
5890}
5891
5892static VALUE
5893vm_declare_module(ID id, VALUE cbase)
5894{
5895 /* new module declaration */
5896 return declare_under(id, cbase, rb_module_new());
5897}
5898
5899NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5900static void
5901unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5902{
5903 VALUE name = rb_id2str(id);
5904 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5905 name, type);
5906 VALUE location = rb_const_source_location_at(cbase, id);
5907 if (!NIL_P(location)) {
5908 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5909 " previous definition of %"PRIsVALUE" was here",
5910 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5911 }
5913}
5914
5915static VALUE
5916vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5917{
5918 VALUE klass;
5919
5920 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5921 rb_raise(rb_eTypeError,
5922 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5923 rb_obj_class(super));
5924 }
5925
5926 vm_check_if_namespace(cbase);
5927
5928 /* find klass */
5929 rb_autoload_load(cbase, id);
5930
5931 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5932 if (!vm_check_if_class(id, flags, super, klass))
5933 unmatched_redefinition("class", cbase, id, klass);
5934 return klass;
5935 }
5936 else {
5937 return vm_declare_class(id, flags, cbase, super);
5938 }
5939}
5940
5941static VALUE
5942vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5943{
5944 VALUE mod;
5945
5946 vm_check_if_namespace(cbase);
5947 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5948 if (!vm_check_if_module(id, mod))
5949 unmatched_redefinition("module", cbase, id, mod);
5950 return mod;
5951 }
5952 else {
5953 return vm_declare_module(id, cbase);
5954 }
5955}
5956
5957static VALUE
5958vm_find_or_create_class_by_id(ID id,
5959 rb_num_t flags,
5960 VALUE cbase,
5961 VALUE super)
5962{
5963 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5964
5965 switch (type) {
5966 case VM_DEFINECLASS_TYPE_CLASS:
5967 /* classdef returns class scope value */
5968 return vm_define_class(id, flags, cbase, super);
5969
5970 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5971 /* classdef returns class scope value */
5972 return rb_singleton_class(cbase);
5973
5974 case VM_DEFINECLASS_TYPE_MODULE:
5975 /* classdef returns class scope value */
5976 return vm_define_module(id, flags, cbase);
5977
5978 default:
5979 rb_bug("unknown defineclass type: %d", (int)type);
5980 }
5981}
5982
5983static rb_method_visibility_t
5984vm_scope_visibility_get(const rb_execution_context_t *ec)
5985{
5986 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5987
5988 if (!vm_env_cref_by_cref(cfp->ep)) {
5989 return METHOD_VISI_PUBLIC;
5990 }
5991 else {
5992 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5993 }
5994}
5995
5996static int
5997vm_scope_module_func_check(const rb_execution_context_t *ec)
5998{
5999 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6000
6001 if (!vm_env_cref_by_cref(cfp->ep)) {
6002 return FALSE;
6003 }
6004 else {
6005 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6006 }
6007}
6008
6009static void
6010vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6011{
6012 VALUE klass;
6013 rb_method_visibility_t visi;
6014 rb_cref_t *cref = vm_ec_cref(ec);
6015
6016 if (is_singleton) {
6017 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6018 visi = METHOD_VISI_PUBLIC;
6019 }
6020 else {
6021 klass = CREF_CLASS_FOR_DEFINITION(cref);
6022 visi = vm_scope_visibility_get(ec);
6023 }
6024
6025 if (NIL_P(klass)) {
6026 rb_raise(rb_eTypeError, "no class/module to add method");
6027 }
6028
6029 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6030 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6031 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6032 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6033 }
6034
6035 if (!is_singleton && vm_scope_module_func_check(ec)) {
6036 klass = rb_singleton_class(klass);
6037 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6038 }
6039}
6040
6041static VALUE
6042vm_invokeblock_i(struct rb_execution_context_struct *ec,
6043 struct rb_control_frame_struct *reg_cfp,
6044 struct rb_calling_info *calling)
6045{
6046 const struct rb_callinfo *ci = calling->cd->ci;
6047 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6048
6049 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6050 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6051 }
6052 else {
6053 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6054 }
6055}
6056
6057enum method_explorer_type {
6058 mexp_search_method,
6059 mexp_search_invokeblock,
6060 mexp_search_super,
6061};
6062
6063static inline VALUE
6064vm_sendish(
6065 struct rb_execution_context_struct *ec,
6066 struct rb_control_frame_struct *reg_cfp,
6067 struct rb_call_data *cd,
6068 VALUE block_handler,
6069 enum method_explorer_type method_explorer
6070) {
6071 VALUE val = Qundef;
6072 const struct rb_callinfo *ci = cd->ci;
6073 const struct rb_callcache *cc;
6074 int argc = vm_ci_argc(ci);
6075 VALUE recv = TOPN(argc);
6076 struct rb_calling_info calling = {
6077 .block_handler = block_handler,
6078 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6079 .recv = recv,
6080 .argc = argc,
6081 .cd = cd,
6082 };
6083
6084 switch (method_explorer) {
6085 case mexp_search_method:
6086 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6087 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6088 break;
6089 case mexp_search_super:
6090 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6091 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6092 break;
6093 case mexp_search_invokeblock:
6094 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6095 break;
6096 }
6097 return val;
6098}
6099
6100VALUE
6101rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6102{
6103 stack_check(ec);
6104 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6105 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6106 VM_EXEC(ec, val);
6107 return val;
6108}
6109
6110VALUE
6111rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6112{
6113 stack_check(ec);
6114
6115 struct rb_forwarding_call_data adjusted_cd;
6116 struct rb_callinfo adjusted_ci;
6117
6118 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6119
6120 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6121
6122 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6123 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6124 }
6125
6126 VM_EXEC(ec, val);
6127 return val;
6128}
6129
6130VALUE
6131rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6132{
6133 stack_check(ec);
6134 VALUE bh = VM_BLOCK_HANDLER_NONE;
6135 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6136 VM_EXEC(ec, val);
6137 return val;
6138}
6139
6140VALUE
6141rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6142{
6143 stack_check(ec);
6144
6145 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6146 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6147
6148 VM_EXEC(ec, val);
6149 return val;
6150}
6151
6152VALUE
6153rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6154{
6155 stack_check(ec);
6156 struct rb_forwarding_call_data adjusted_cd;
6157 struct rb_callinfo adjusted_ci;
6158
6159 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6160
6161 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6162
6163 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6164 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6165 }
6166
6167 VM_EXEC(ec, val);
6168 return val;
6169}
6170
6171VALUE
6172rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6173{
6174 stack_check(ec);
6175 VALUE bh = VM_BLOCK_HANDLER_NONE;
6176 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6177 VM_EXEC(ec, val);
6178 return val;
6179}
6180
6181/* object.c */
6182VALUE rb_nil_to_s(VALUE);
6183VALUE rb_true_to_s(VALUE);
6184VALUE rb_false_to_s(VALUE);
6185/* numeric.c */
6186VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6187VALUE rb_fix_to_s(VALUE);
6188/* variable.c */
6189VALUE rb_mod_to_s(VALUE);
6191
6192static VALUE
6193vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6194{
6195 int type = TYPE(recv);
6196 if (type == T_STRING) {
6197 return recv;
6198 }
6199
6200 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
6201
6202 switch (type) {
6203 case T_SYMBOL:
6204 if (check_method_basic_definition(cme)) {
6205 // rb_sym_to_s() allocates a mutable string, but since we are only
6206 // going to use this string for interpolation, it's fine to use the
6207 // frozen string.
6208 return rb_sym2str(recv);
6209 }
6210 break;
6211 case T_MODULE:
6212 case T_CLASS:
6213 if (check_cfunc(cme, rb_mod_to_s)) {
6214 // rb_mod_to_s() allocates a mutable string, but since we are only
6215 // going to use this string for interpolation, it's fine to use the
6216 // frozen string.
6217 VALUE val = rb_mod_name(recv);
6218 if (NIL_P(val)) {
6219 val = rb_mod_to_s(recv);
6220 }
6221 return val;
6222 }
6223 break;
6224 case T_NIL:
6225 if (check_cfunc(cme, rb_nil_to_s)) {
6226 return rb_nil_to_s(recv);
6227 }
6228 break;
6229 case T_TRUE:
6230 if (check_cfunc(cme, rb_true_to_s)) {
6231 return rb_true_to_s(recv);
6232 }
6233 break;
6234 case T_FALSE:
6235 if (check_cfunc(cme, rb_false_to_s)) {
6236 return rb_false_to_s(recv);
6237 }
6238 break;
6239 case T_FIXNUM:
6240 if (check_cfunc(cme, rb_int_to_s)) {
6241 return rb_fix_to_s(recv);
6242 }
6243 break;
6244 }
6245 return Qundef;
6246}
6247
6248// ZJIT implementation is using the C function
6249// and needs to call a non-static function
6250VALUE
6251rb_vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6252{
6253 return vm_objtostring(iseq, recv, cd);
6254}
6255
6256static VALUE
6257vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6258{
6259 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6260 return ary;
6261 }
6262 else {
6263 return Qundef;
6264 }
6265}
6266
6267static VALUE
6268vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6269{
6270 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6271 return hash;
6272 }
6273 else {
6274 return Qundef;
6275 }
6276}
6277
6278static VALUE
6279vm_opt_str_freeze(VALUE str, int bop, ID id)
6280{
6281 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6282 return str;
6283 }
6284 else {
6285 return Qundef;
6286 }
6287}
6288
6289/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6290#define id_cmp idCmp
6291
6292static VALUE
6293vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6294{
6295 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6296 return rb_ary_includes(ary, target);
6297 }
6298 else {
6299 VALUE args[1] = {target};
6300
6301 // duparray
6302 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6303 VALUE dupary = rb_ary_resurrect(ary);
6304
6305 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6306 }
6307}
6308
6309VALUE
6310rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6311{
6312 return vm_opt_duparray_include_p(ec, ary, target);
6313}
6314
6315static VALUE
6316vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6317{
6318 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6319 if (num == 0) {
6320 return Qnil;
6321 }
6322 else {
6323 VALUE result = *ptr;
6324 rb_snum_t i = num - 1;
6325 while (i-- > 0) {
6326 const VALUE v = *++ptr;
6327 if (OPTIMIZED_CMP(v, result) > 0) {
6328 result = v;
6329 }
6330 }
6331 return result;
6332 }
6333 }
6334 else {
6335 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6336 }
6337}
6338
6339VALUE
6340rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6341{
6342 return vm_opt_newarray_max(ec, num, ptr);
6343}
6344
6345static VALUE
6346vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6347{
6348 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6349 if (num == 0) {
6350 return Qnil;
6351 }
6352 else {
6353 VALUE result = *ptr;
6354 rb_snum_t i = num - 1;
6355 while (i-- > 0) {
6356 const VALUE v = *++ptr;
6357 if (OPTIMIZED_CMP(v, result) < 0) {
6358 result = v;
6359 }
6360 }
6361 return result;
6362 }
6363 }
6364 else {
6365 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6366 }
6367}
6368
6369VALUE
6370rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6371{
6372 return vm_opt_newarray_min(ec, num, ptr);
6373}
6374
6375static VALUE
6376vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6377{
6378 // If Array#hash is _not_ monkeypatched, use the optimized call
6379 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6380 return rb_ary_hash_values(num, ptr);
6381 }
6382 else {
6383 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6384 }
6385}
6386
6387VALUE
6388rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6389{
6390 return vm_opt_newarray_hash(ec, num, ptr);
6391}
6392
6393VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6394VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6395
6396static VALUE
6397vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6398{
6399 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6400 struct RArray fake_ary;
6401 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6402 return rb_ary_includes(ary, target);
6403 }
6404 else {
6405 VALUE args[1] = {target};
6406 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6407 }
6408}
6409
6410VALUE
6411rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6412{
6413 return vm_opt_newarray_include_p(ec, num, ptr, target);
6414}
6415
6416static VALUE
6417vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6418{
6419 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6420 struct RArray fake_ary;
6421 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6422 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6423 }
6424 else {
6425 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6426 // Setup an array with room for keyword hash.
6427 VALUE args[2];
6428 args[0] = fmt;
6429 int kw_splat = RB_NO_KEYWORDS;
6430 int argc = 1;
6431
6432 if (!UNDEF_P(buffer)) {
6433 args[1] = rb_hash_new_with_size(1);
6434 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6435 kw_splat = RB_PASS_KEYWORDS;
6436 argc++;
6437 }
6438
6439 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6440 }
6441}
6442
6443VALUE
6444rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6445{
6446 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6447}
6448
6449VALUE
6450rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6451{
6452 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6453}
6454
6455#undef id_cmp
6456
6457static void
6458vm_track_constant_cache(ID id, void *ic)
6459{
6460 rb_vm_t *vm = GET_VM();
6461 struct rb_id_table *const_cache = vm->constant_cache;
6462 VALUE lookup_result;
6463 set_table *ics;
6464
6465 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6466 ics = (set_table *)lookup_result;
6467 }
6468 else {
6469 ics = set_init_numtable();
6470 rb_id_table_insert(const_cache, id, (VALUE)ics);
6471 }
6472
6473 /* The call below to st_insert could allocate which could trigger a GC.
6474 * If it triggers a GC, it may free an iseq that also holds a cache to this
6475 * constant. If that iseq is the last iseq with a cache to this constant, then
6476 * it will free this ST table, which would cause an use-after-free during this
6477 * st_insert.
6478 *
6479 * So to fix this issue, we store the ID that is currently being inserted
6480 * and, in remove_from_constant_cache, we don't free the ST table for ID
6481 * equal to this one.
6482 *
6483 * See [Bug #20921].
6484 */
6485 vm->inserting_constant_cache_id = id;
6486
6487 set_insert(ics, (st_data_t)ic);
6488
6489 vm->inserting_constant_cache_id = (ID)0;
6490}
6491
6492static void
6493vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6494{
6495 RB_VM_LOCKING() {
6496 for (int i = 0; segments[i]; i++) {
6497 ID id = segments[i];
6498 if (id == idNULL) continue;
6499 vm_track_constant_cache(id, ic);
6500 }
6501 }
6502}
6503
6504// For JIT inlining
6505static inline bool
6506vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6507{
6508 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6509 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6510
6511 return (ic_cref == NULL || // no need to check CREF
6512 ic_cref == vm_get_cref(reg_ep));
6513 }
6514 return false;
6515}
6516
6517static bool
6518vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6519{
6520 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6521 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6522}
6523
6524// YJIT needs this function to never allocate and never raise
6525bool
6526rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6527{
6528 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6529}
6530
6531static void
6532vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6533{
6534 if (ruby_vm_const_missing_count > 0) {
6535 ruby_vm_const_missing_count = 0;
6536 ic->entry = NULL;
6537 return;
6538 }
6539
6540 struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6541 RB_OBJ_WRITE(ice, &ice->value, val);
6542 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6543 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6544 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6545
6546 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6547 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6548 rb_yjit_constant_ic_update(iseq, ic, pos);
6549}
6550
6551VALUE
6552rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6553{
6554 VALUE val;
6555 const ID *segments = ic->segments;
6556 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6557 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6558 val = ice->value;
6559
6560 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6561 }
6562 else {
6563 ruby_vm_constant_cache_misses++;
6564 val = vm_get_ev_const_chain(ec, segments);
6565 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6566 // Undo the PC increment to get the address to this instruction
6567 // INSN_ATTR(width) == 2
6568 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6569 }
6570 return val;
6571}
6572
6573static VALUE
6574vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6575{
6576 rb_thread_t *th = rb_ec_thread_ptr(ec);
6577 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6578
6579 again:
6580 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6581 return is->once.value;
6582 }
6583 else if (is->once.running_thread == NULL) {
6584 VALUE val;
6585 is->once.running_thread = th;
6586 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6587 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6588 /* is->once.running_thread is cleared by vm_once_clear() */
6589 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6590 return val;
6591 }
6592 else if (is->once.running_thread == th) {
6593 /* recursive once */
6594 return vm_once_exec((VALUE)iseq);
6595 }
6596 else {
6597 /* waiting for finish */
6598 RUBY_VM_CHECK_INTS(ec);
6600 goto again;
6601 }
6602}
6603
6604static OFFSET
6605vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6606{
6607 switch (OBJ_BUILTIN_TYPE(key)) {
6608 case -1:
6609 case T_FLOAT:
6610 case T_SYMBOL:
6611 case T_BIGNUM:
6612 case T_STRING:
6613 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6614 SYMBOL_REDEFINED_OP_FLAG |
6615 INTEGER_REDEFINED_OP_FLAG |
6616 FLOAT_REDEFINED_OP_FLAG |
6617 NIL_REDEFINED_OP_FLAG |
6618 TRUE_REDEFINED_OP_FLAG |
6619 FALSE_REDEFINED_OP_FLAG |
6620 STRING_REDEFINED_OP_FLAG)) {
6621 st_data_t val;
6622 if (RB_FLOAT_TYPE_P(key)) {
6623 double kval = RFLOAT_VALUE(key);
6624 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6625 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6626 }
6627 }
6628 if (rb_hash_stlike_lookup(hash, key, &val)) {
6629 return FIX2LONG((VALUE)val);
6630 }
6631 else {
6632 return else_offset;
6633 }
6634 }
6635 }
6636 return 0;
6637}
6638
6639NORETURN(static void
6640 vm_stack_consistency_error(const rb_execution_context_t *ec,
6641 const rb_control_frame_t *,
6642 const VALUE *));
6643static void
6644vm_stack_consistency_error(const rb_execution_context_t *ec,
6645 const rb_control_frame_t *cfp,
6646 const VALUE *bp)
6647{
6648 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6649 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6650 static const char stack_consistency_error[] =
6651 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6652#if defined RUBY_DEVEL
6653 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6654 rb_str_cat_cstr(mesg, "\n");
6655 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6657#else
6658 rb_bug(stack_consistency_error, nsp, nbp);
6659#endif
6660}
6661
6662static VALUE
6663vm_opt_plus(VALUE recv, VALUE obj)
6664{
6665 if (FIXNUM_2_P(recv, obj) &&
6666 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6667 return rb_fix_plus_fix(recv, obj);
6668 }
6669 else if (FLONUM_2_P(recv, obj) &&
6670 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6671 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6672 }
6673 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6674 return Qundef;
6675 }
6676 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6677 RBASIC_CLASS(obj) == rb_cFloat &&
6678 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6679 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6680 }
6681 else if (RBASIC_CLASS(recv) == rb_cString &&
6682 RBASIC_CLASS(obj) == rb_cString &&
6683 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6684 return rb_str_opt_plus(recv, obj);
6685 }
6686 else if (RBASIC_CLASS(recv) == rb_cArray &&
6687 RBASIC_CLASS(obj) == rb_cArray &&
6688 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6689 return rb_ary_plus(recv, obj);
6690 }
6691 else {
6692 return Qundef;
6693 }
6694}
6695
6696static VALUE
6697vm_opt_minus(VALUE recv, VALUE obj)
6698{
6699 if (FIXNUM_2_P(recv, obj) &&
6700 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6701 return rb_fix_minus_fix(recv, obj);
6702 }
6703 else if (FLONUM_2_P(recv, obj) &&
6704 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6705 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6706 }
6707 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6708 return Qundef;
6709 }
6710 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6711 RBASIC_CLASS(obj) == rb_cFloat &&
6712 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6713 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6714 }
6715 else {
6716 return Qundef;
6717 }
6718}
6719
6720static VALUE
6721vm_opt_mult(VALUE recv, VALUE obj)
6722{
6723 if (FIXNUM_2_P(recv, obj) &&
6724 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6725 return rb_fix_mul_fix(recv, obj);
6726 }
6727 else if (FLONUM_2_P(recv, obj) &&
6728 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6729 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6730 }
6731 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6732 return Qundef;
6733 }
6734 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6735 RBASIC_CLASS(obj) == rb_cFloat &&
6736 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6737 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6738 }
6739 else {
6740 return Qundef;
6741 }
6742}
6743
6744static VALUE
6745vm_opt_div(VALUE recv, VALUE obj)
6746{
6747 if (FIXNUM_2_P(recv, obj) &&
6748 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6749 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6750 }
6751 else if (FLONUM_2_P(recv, obj) &&
6752 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6753 return rb_flo_div_flo(recv, obj);
6754 }
6755 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6756 return Qundef;
6757 }
6758 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6759 RBASIC_CLASS(obj) == rb_cFloat &&
6760 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6761 return rb_flo_div_flo(recv, obj);
6762 }
6763 else {
6764 return Qundef;
6765 }
6766}
6767
6768static VALUE
6769vm_opt_mod(VALUE recv, VALUE obj)
6770{
6771 if (FIXNUM_2_P(recv, obj) &&
6772 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6773 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6774 }
6775 else if (FLONUM_2_P(recv, obj) &&
6776 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6777 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6778 }
6779 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6780 return Qundef;
6781 }
6782 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6783 RBASIC_CLASS(obj) == rb_cFloat &&
6784 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6785 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6786 }
6787 else {
6788 return Qundef;
6789 }
6790}
6791
6792static VALUE
6793vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6794{
6795 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6796 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6797
6798 if (!UNDEF_P(val)) {
6799 return RBOOL(!RTEST(val));
6800 }
6801 }
6802
6803 return Qundef;
6804}
6805
6806static VALUE
6807vm_opt_lt(VALUE recv, VALUE obj)
6808{
6809 if (FIXNUM_2_P(recv, obj) &&
6810 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6811 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6812 }
6813 else if (FLONUM_2_P(recv, obj) &&
6814 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6815 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6816 }
6817 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6818 return Qundef;
6819 }
6820 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6821 RBASIC_CLASS(obj) == rb_cFloat &&
6822 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6823 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6824 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6825 }
6826 else {
6827 return Qundef;
6828 }
6829}
6830
6831static VALUE
6832vm_opt_le(VALUE recv, VALUE obj)
6833{
6834 if (FIXNUM_2_P(recv, obj) &&
6835 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6836 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6837 }
6838 else if (FLONUM_2_P(recv, obj) &&
6839 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6840 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6841 }
6842 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6843 return Qundef;
6844 }
6845 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6846 RBASIC_CLASS(obj) == rb_cFloat &&
6847 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6848 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6849 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6850 }
6851 else {
6852 return Qundef;
6853 }
6854}
6855
6856static VALUE
6857vm_opt_gt(VALUE recv, VALUE obj)
6858{
6859 if (FIXNUM_2_P(recv, obj) &&
6860 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6861 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6862 }
6863 else if (FLONUM_2_P(recv, obj) &&
6864 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6865 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6866 }
6867 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6868 return Qundef;
6869 }
6870 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6871 RBASIC_CLASS(obj) == rb_cFloat &&
6872 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6873 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6874 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6875 }
6876 else {
6877 return Qundef;
6878 }
6879}
6880
6881static VALUE
6882vm_opt_ge(VALUE recv, VALUE obj)
6883{
6884 if (FIXNUM_2_P(recv, obj) &&
6885 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6886 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6887 }
6888 else if (FLONUM_2_P(recv, obj) &&
6889 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6890 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6891 }
6892 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6893 return Qundef;
6894 }
6895 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6896 RBASIC_CLASS(obj) == rb_cFloat &&
6897 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6898 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6899 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6900 }
6901 else {
6902 return Qundef;
6903 }
6904}
6905
6906
6907static VALUE
6908vm_opt_ltlt(VALUE recv, VALUE obj)
6909{
6910 if (SPECIAL_CONST_P(recv)) {
6911 return Qundef;
6912 }
6913 else if (RBASIC_CLASS(recv) == rb_cString &&
6914 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6915 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6916 return rb_str_buf_append(recv, obj);
6917 }
6918 else {
6919 return rb_str_concat(recv, obj);
6920 }
6921 }
6922 else if (RBASIC_CLASS(recv) == rb_cArray &&
6923 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6924 return rb_ary_push(recv, obj);
6925 }
6926 else {
6927 return Qundef;
6928 }
6929}
6930
6931static VALUE
6932vm_opt_and(VALUE recv, VALUE obj)
6933{
6934 // If recv and obj are both fixnums, then the bottom tag bit
6935 // will be 1 on both. 1 & 1 == 1, so the result value will also
6936 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6937 // will be 0, and we return Qundef.
6938 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6939
6940 if (FIXNUM_P(ret) &&
6941 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6942 return ret;
6943 }
6944 else {
6945 return Qundef;
6946 }
6947}
6948
6949static VALUE
6950vm_opt_or(VALUE recv, VALUE obj)
6951{
6952 if (FIXNUM_2_P(recv, obj) &&
6953 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6954 return recv | obj;
6955 }
6956 else {
6957 return Qundef;
6958 }
6959}
6960
6961static VALUE
6962vm_opt_aref(VALUE recv, VALUE obj)
6963{
6964 if (SPECIAL_CONST_P(recv)) {
6965 if (FIXNUM_2_P(recv, obj) &&
6966 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6967 return rb_fix_aref(recv, obj);
6968 }
6969 return Qundef;
6970 }
6971 else if (RBASIC_CLASS(recv) == rb_cArray &&
6972 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6973 if (FIXNUM_P(obj)) {
6974 return rb_ary_entry_internal(recv, FIX2LONG(obj));
6975 }
6976 else {
6977 return rb_ary_aref1(recv, obj);
6978 }
6979 }
6980 else if (RBASIC_CLASS(recv) == rb_cHash &&
6981 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6982 return rb_hash_aref(recv, obj);
6983 }
6984 else {
6985 return Qundef;
6986 }
6987}
6988
6989static VALUE
6990vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6991{
6992 if (SPECIAL_CONST_P(recv)) {
6993 return Qundef;
6994 }
6995 else if (RBASIC_CLASS(recv) == rb_cArray &&
6996 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6997 FIXNUM_P(obj)) {
6998 rb_ary_store(recv, FIX2LONG(obj), set);
6999 return set;
7000 }
7001 else if (RBASIC_CLASS(recv) == rb_cHash &&
7002 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7003 rb_hash_aset(recv, obj, set);
7004 return set;
7005 }
7006 else {
7007 return Qundef;
7008 }
7009}
7010
7011static VALUE
7012vm_opt_length(VALUE recv, int bop)
7013{
7014 if (SPECIAL_CONST_P(recv)) {
7015 return Qundef;
7016 }
7017 else if (RBASIC_CLASS(recv) == rb_cString &&
7018 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7019 if (bop == BOP_EMPTY_P) {
7020 return LONG2NUM(RSTRING_LEN(recv));
7021 }
7022 else {
7023 return rb_str_length(recv);
7024 }
7025 }
7026 else if (RBASIC_CLASS(recv) == rb_cArray &&
7027 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7028 return LONG2NUM(RARRAY_LEN(recv));
7029 }
7030 else if (RBASIC_CLASS(recv) == rb_cHash &&
7031 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7032 return INT2FIX(RHASH_SIZE(recv));
7033 }
7034 else {
7035 return Qundef;
7036 }
7037}
7038
7039static VALUE
7040vm_opt_empty_p(VALUE recv)
7041{
7042 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7043 case Qundef: return Qundef;
7044 case INT2FIX(0): return Qtrue;
7045 default: return Qfalse;
7046 }
7047}
7048
7049VALUE rb_false(VALUE obj);
7050
7051static VALUE
7052vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7053{
7054 if (NIL_P(recv) &&
7055 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7056 return Qtrue;
7057 }
7058 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
7059 return Qfalse;
7060 }
7061 else {
7062 return Qundef;
7063 }
7064}
7065
7066static VALUE
7067fix_succ(VALUE x)
7068{
7069 switch (x) {
7070 case ~0UL:
7071 /* 0xFFFF_FFFF == INT2FIX(-1)
7072 * `-1.succ` is of course 0. */
7073 return INT2FIX(0);
7074 case RSHIFT(~0UL, 1):
7075 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7076 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7077 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7078 default:
7079 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7080 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7081 * == lx*2 + ly*2 + 1
7082 * == (lx*2+1) + (ly*2+1) - 1
7083 * == x + y - 1
7084 *
7085 * Here, if we put y := INT2FIX(1):
7086 *
7087 * == x + INT2FIX(1) - 1
7088 * == x + 2 .
7089 */
7090 return x + 2;
7091 }
7092}
7093
7094static VALUE
7095vm_opt_succ(VALUE recv)
7096{
7097 if (FIXNUM_P(recv) &&
7098 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7099 return fix_succ(recv);
7100 }
7101 else if (SPECIAL_CONST_P(recv)) {
7102 return Qundef;
7103 }
7104 else if (RBASIC_CLASS(recv) == rb_cString &&
7105 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7106 return rb_str_succ(recv);
7107 }
7108 else {
7109 return Qundef;
7110 }
7111}
7112
7113static VALUE
7114vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7115{
7116 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7117 return RBOOL(!RTEST(recv));
7118 }
7119 else {
7120 return Qundef;
7121 }
7122}
7123
7124static VALUE
7125vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7126{
7127 if (SPECIAL_CONST_P(recv)) {
7128 return Qundef;
7129 }
7130 else if (RBASIC_CLASS(recv) == rb_cString &&
7131 CLASS_OF(obj) == rb_cRegexp &&
7132 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7133 return rb_reg_match(obj, recv);
7134 }
7135 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7136 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7137 return rb_reg_match(recv, obj);
7138 }
7139 else {
7140 return Qundef;
7141 }
7142}
7143
7144rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7145
7146NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7147
7148static inline void
7149vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7150 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7151 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7152{
7153 rb_event_flag_t event = pc_events & target_event;
7154 VALUE self = GET_SELF();
7155
7156 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7157
7158 if (event & global_hooks->events) {
7159 /* increment PC because source line is calculated with PC-1 */
7160 reg_cfp->pc++;
7161 vm_dtrace(event, ec);
7162 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7163 reg_cfp->pc--;
7164 }
7165
7166 // Load here since global hook above can add and free local hooks
7167 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7168 if (local_hooks != NULL) {
7169 if (event & local_hooks->events) {
7170 /* increment PC because source line is calculated with PC-1 */
7171 reg_cfp->pc++;
7172 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7173 reg_cfp->pc--;
7174 }
7175 }
7176}
7177
7178#define VM_TRACE_HOOK(target_event, val) do { \
7179 if ((pc_events & (target_event)) & enabled_flags) { \
7180 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7181 } \
7182} while (0)
7183
7184static VALUE
7185rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7186{
7187 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7188 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7189 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7190}
7191
7192static void
7193vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7194{
7195 const VALUE *pc = reg_cfp->pc;
7196 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7197 rb_event_flag_t global_events = enabled_flags;
7198
7199 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7200 return;
7201 }
7202 else {
7203 const rb_iseq_t *iseq = reg_cfp->iseq;
7204 VALUE iseq_val = (VALUE)iseq;
7205 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7206 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7207 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7208 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7209 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7210 rb_hook_list_t *bmethod_local_hooks = NULL;
7211 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7212 rb_event_flag_t bmethod_local_events = 0;
7213 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7214 enabled_flags |= iseq_local_events;
7215
7216 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7217
7218 if (bmethod_frame) {
7219 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7220 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7221 bmethod_local_hooks = me->def->body.bmethod.hooks;
7222 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7223 if (bmethod_local_hooks) {
7224 bmethod_local_events = bmethod_local_hooks->events;
7225 }
7226 }
7227
7228
7229 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7230#if 0
7231 /* disable trace */
7232 /* TODO: incomplete */
7233 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7234#else
7235 /* do not disable trace because of performance problem
7236 * (re-enable overhead)
7237 */
7238#endif
7239 return;
7240 }
7241 else if (ec->trace_arg != NULL) {
7242 /* already tracing */
7243 return;
7244 }
7245 else {
7246 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7247 /* Note, not considering iseq local events here since the same
7248 * iseq could be used in multiple bmethods. */
7249 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7250
7251 if (0) {
7252 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7253 (int)pos,
7254 (int)pc_events,
7255 RSTRING_PTR(rb_iseq_path(iseq)),
7256 (int)rb_iseq_line_no(iseq, pos),
7257 RSTRING_PTR(rb_iseq_label(iseq)));
7258 }
7259 VM_ASSERT(reg_cfp->pc == pc);
7260 VM_ASSERT(pc_events != 0);
7261
7262 /* check traces */
7263 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7264 /* b_call instruction running as a method. Fire call event. */
7265 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7266 }
7268 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7269 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7270 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7271 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7272 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7273 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7274 /* b_return instruction running as a method. Fire return event. */
7275 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7276 }
7277
7278 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7279 // We need the pointer to stay valid in case compaction happens in a trace hook.
7280 //
7281 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7282 // storage for `rb_method_definition_t` is not on the GC heap.
7283 RB_GC_GUARD(iseq_val);
7284 }
7285 }
7286}
7287#undef VM_TRACE_HOOK
7288
7289#if VM_CHECK_MODE > 0
7290NORETURN( NOINLINE( COLDFUNC
7291void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7292
7293void
7294Init_vm_stack_canary(void)
7295{
7296 /* This has to be called _after_ our PRNG is properly set up. */
7297 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7298 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7299
7300 vm_stack_canary_was_born = true;
7301 VM_ASSERT(n == 0);
7302}
7303
7304void
7305rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7306{
7307 /* Because a method has already been called, why not call
7308 * another one. */
7309 const char *insn = rb_insns_name(i);
7310 VALUE inspection = rb_inspect(c);
7311 const char *str = StringValueCStr(inspection);
7312
7313 rb_bug("dead canary found at %s: %s", insn, str);
7314}
7315
7316#else
7317void Init_vm_stack_canary(void) { /* nothing to do */ }
7318#endif
7319
7320
7321/* a part of the following code is generated by this ruby script:
7322
732316.times{|i|
7324 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7325 typedef_args.prepend(", ") if i != 0
7326 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7327 call_args.prepend(", ") if i != 0
7328 puts %Q{
7329static VALUE
7330builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7331{
7332 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7333 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7334}}
7335}
7336
7337puts
7338puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
733916.times{|i|
7340 puts " builtin_invoker#{i},"
7341}
7342puts "};"
7343*/
7344
7345static VALUE
7346builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7347{
7348 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7349 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7350}
7351
7352static VALUE
7353builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7354{
7355 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7356 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7357}
7358
7359static VALUE
7360builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7361{
7362 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7363 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7364}
7365
7366static VALUE
7367builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7368{
7369 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7370 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7371}
7372
7373static VALUE
7374builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7375{
7376 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7377 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7378}
7379
7380static VALUE
7381builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7382{
7383 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7384 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7385}
7386
7387static VALUE
7388builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7389{
7390 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7391 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7392}
7393
7394static VALUE
7395builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7396{
7397 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7398 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7399}
7400
7401static VALUE
7402builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7403{
7404 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7405 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7406}
7407
7408static VALUE
7409builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7410{
7411 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7412 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7413}
7414
7415static VALUE
7416builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7417{
7418 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7419 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7420}
7421
7422static VALUE
7423builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7424{
7425 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7426 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7427}
7428
7429static VALUE
7430builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7431{
7432 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7433 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7434}
7435
7436static VALUE
7437builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7438{
7439 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7440 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7441}
7442
7443static VALUE
7444builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7445{
7446 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7447 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7448}
7449
7450static VALUE
7451builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7452{
7453 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7454 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7455}
7456
7457typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7458
7459static builtin_invoker
7460lookup_builtin_invoker(int argc)
7461{
7462 static const builtin_invoker invokers[] = {
7463 builtin_invoker0,
7464 builtin_invoker1,
7465 builtin_invoker2,
7466 builtin_invoker3,
7467 builtin_invoker4,
7468 builtin_invoker5,
7469 builtin_invoker6,
7470 builtin_invoker7,
7471 builtin_invoker8,
7472 builtin_invoker9,
7473 builtin_invoker10,
7474 builtin_invoker11,
7475 builtin_invoker12,
7476 builtin_invoker13,
7477 builtin_invoker14,
7478 builtin_invoker15,
7479 };
7480
7481 return invokers[argc];
7482}
7483
7484static inline VALUE
7485invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7486{
7487 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7488 SETUP_CANARY(canary_p);
7489 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7490 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7491 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7492 return ret;
7493}
7494
7495static VALUE
7496vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7497{
7498 return invoke_bf(ec, cfp, bf, argv);
7499}
7500
7501static VALUE
7502vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7503{
7504 if (0) { // debug print
7505 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7506 for (int i=0; i<bf->argc; i++) {
7507 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7508 }
7509 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7510 (void *)(uintptr_t)bf->func_ptr);
7511 }
7512
7513 if (bf->argc == 0) {
7514 return invoke_bf(ec, cfp, bf, NULL);
7515 }
7516 else {
7517 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7518 return invoke_bf(ec, cfp, bf, argv);
7519 }
7520}
7521
7522// for __builtin_inline!()
7523
7524VALUE
7525rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7526{
7527 const rb_control_frame_t *cfp = ec->cfp;
7528 return cfp->ep[index];
7529}
7530
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2797
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1575
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1467
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1446
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3839
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:695
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4160
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2152
VALUE rb_cRegexp
Regexp class.
Definition re.c:2657
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1338
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:262
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:684
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:253
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:907
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:83
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1032
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1947
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3717
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1922
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2004
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1905
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1971
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2037
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3760
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5396
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3726
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3997
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2400
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:937
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1491
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3457
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1984
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4248
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4304
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1459
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3924
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3292
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3463
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:422
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2063
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3786
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4326
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:379
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3780
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1609
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2176
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:280
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376