Ruby 4.1.0dev (2026-04-22 revision cba70c3532c34803bae065745b799103635ec67a)
vm_insnhelper.c (cba70c3532c34803bae065745b799103635ec67a)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions. Included into vm.c.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 ._iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (CREF_DYNAMIC(cref) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref))) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* no dynamic singleton class or cloned class found */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%"PRIsVALUE" by non-main ractor.", rb_class_path(klass), rb_id2str(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 VALUE rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_marked_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 RB_OBJ_WRITE((VALUE)ent, &ent->cref, cref);
1621 ic->entry = ent;
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1625 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1626
1627 return cvar_value;
1628}
1629
1630static inline VALUE
1631vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1632{
1633 const rb_cref_t *cref;
1634 cref = vm_get_cref(GET_EP());
1635
1636 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1637 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1638
1639 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1640 RUBY_ASSERT(!UNDEF_P(v));
1641
1642 return v;
1643 }
1644
1645 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1646
1647 return update_classvariable_cache(iseq, klass, id, cref, ic);
1648}
1649
1650VALUE
1651rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1652{
1653 return vm_getclassvariable(iseq, cfp, id, ic);
1654}
1655
1656static inline void
1657vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1658{
1659 const rb_cref_t *cref;
1660 cref = vm_get_cref(GET_EP());
1661
1662 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1663 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1664
1665 rb_class_ivar_set(ic->entry->class_value, id, val);
1666 return;
1667 }
1668
1669 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1670
1671 rb_cvar_set(klass, id, val);
1672
1673 update_classvariable_cache(iseq, klass, id, cref, ic);
1674}
1675
1676void
1677rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1678{
1679 vm_setclassvariable(iseq, cfp, id, val, ic);
1680}
1681
1682ALWAYS_INLINE(static VALUE vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic));
1683static inline VALUE
1684vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1685{
1686 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1687}
1688
1689static inline void
1690vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1691{
1692 if (RB_SPECIAL_CONST_P(obj)) {
1694 return;
1695 }
1696
1697 shape_id_t dest_shape_id;
1698 attr_index_t index;
1699 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1700
1701 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1702 switch (BUILTIN_TYPE(obj)) {
1703 case T_OBJECT:
1704 break;
1705 case T_CLASS:
1706 case T_MODULE:
1707 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1708 return;
1709 }
1710 break;
1711 default:
1712 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1713 return;
1714 }
1715 }
1716 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1717 }
1718}
1719
1720void
1721rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1722{
1723 vm_setinstancevariable(iseq, obj, id, val, ic);
1724}
1725
1726VALUE
1727rb_vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1728{
1729 return vm_getinstancevariable(iseq, obj, id, ic);
1730}
1731
1732static VALUE
1733vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1734{
1735 /* continue throw */
1736
1737 if (FIXNUM_P(err)) {
1738 ec->tag->state = RUBY_TAG_FATAL;
1739 }
1740 else if (SYMBOL_P(err)) {
1741 ec->tag->state = TAG_THROW;
1742 }
1743 else if (THROW_DATA_P(err)) {
1744 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1745 }
1746 else {
1747 ec->tag->state = TAG_RAISE;
1748 }
1749 return err;
1750}
1751
1752static VALUE
1753vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1754 const int flag, const VALUE throwobj)
1755{
1756 const rb_control_frame_t *escape_cfp = NULL;
1757 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1758
1759 if (flag != 0) {
1760 /* do nothing */
1761 }
1762 else if (state == TAG_BREAK) {
1763 int is_orphan = 1;
1764 const VALUE *ep = GET_EP();
1765 const rb_iseq_t *base_iseq = GET_ISEQ();
1766 escape_cfp = reg_cfp;
1767
1768 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1769 if (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1770 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1771 ep = escape_cfp->ep;
1772 base_iseq = CFP_ISEQ(escape_cfp);
1773 }
1774 else {
1775 ep = VM_ENV_PREV_EP(ep);
1776 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1777 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1778 VM_ASSERT(CFP_ISEQ(escape_cfp) == base_iseq);
1779 }
1780 }
1781
1782 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1783 /* lambda{... break ...} */
1784 is_orphan = 0;
1785 state = TAG_RETURN;
1786 }
1787 else {
1788 ep = VM_ENV_PREV_EP(ep);
1789
1790 while (escape_cfp < eocfp) {
1791 if (escape_cfp->ep == ep) {
1792 const rb_iseq_t *const iseq = CFP_ISEQ(escape_cfp);
1793 const VALUE epc = CFP_PC(escape_cfp) - ISEQ_BODY(iseq)->iseq_encoded;
1794 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1795 unsigned int i;
1796
1797 if (!ct) break;
1798 for (i=0; i < ct->size; i++) {
1799 const struct iseq_catch_table_entry *const entry =
1800 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1801
1802 if (entry->type == CATCH_TYPE_BREAK &&
1803 entry->iseq == base_iseq &&
1804 entry->start < epc && entry->end >= epc) {
1805 if (entry->cont == epc) { /* found! */
1806 is_orphan = 0;
1807 }
1808 break;
1809 }
1810 }
1811 break;
1812 }
1813
1814 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1815 }
1816 }
1817
1818 if (is_orphan) {
1819 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1820 }
1821 }
1822 else if (state == TAG_RETRY) {
1823 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1824
1825 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1826 }
1827 else if (state == TAG_RETURN) {
1828 const VALUE *current_ep = GET_EP();
1829 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1830 int in_class_frame = 0;
1831 int toplevel = 1;
1832 escape_cfp = reg_cfp;
1833
1834 // find target_lep, target_ep
1835 while (!VM_ENV_LOCAL_P(ep)) {
1836 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1837 target_ep = ep;
1838 }
1839 ep = VM_ENV_PREV_EP(ep);
1840 }
1841 target_lep = ep;
1842
1843 while (escape_cfp < eocfp) {
1844 const VALUE *lep = VM_CF_LEP(escape_cfp);
1845
1846 if (!target_lep) {
1847 target_lep = lep;
1848 }
1849
1850 if (lep == target_lep &&
1851 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1852 ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_CLASS) {
1853 in_class_frame = 1;
1854 target_lep = 0;
1855 }
1856
1857 if (lep == target_lep) {
1858 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1859 toplevel = 0;
1860 if (in_class_frame) {
1861 /* lambda {class A; ... return ...; end} */
1862 goto valid_return;
1863 }
1864 else {
1865 const VALUE *tep = current_ep;
1866
1867 while (target_lep != tep) {
1868 if (escape_cfp->ep == tep) {
1869 /* in lambda */
1870 if (tep == target_ep) {
1871 goto valid_return;
1872 }
1873 else {
1874 goto unexpected_return;
1875 }
1876 }
1877 tep = VM_ENV_PREV_EP(tep);
1878 }
1879 }
1880 }
1881 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1882 switch (ISEQ_BODY(CFP_ISEQ(escape_cfp))->type) {
1883 case ISEQ_TYPE_TOP:
1884 case ISEQ_TYPE_MAIN:
1885 if (toplevel) {
1886 if (in_class_frame) goto unexpected_return;
1887 if (target_ep == NULL) {
1888 goto valid_return;
1889 }
1890 else {
1891 goto unexpected_return;
1892 }
1893 }
1894 break;
1895 case ISEQ_TYPE_EVAL: {
1896 const rb_iseq_t *is = CFP_ISEQ(escape_cfp);
1897 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1898 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1899 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1900 t = ISEQ_BODY(is)->type;
1901 }
1902 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1903 break;
1904 }
1905 case ISEQ_TYPE_CLASS:
1906 toplevel = 0;
1907 break;
1908 default:
1909 break;
1910 }
1911 }
1912 }
1913
1914 if (escape_cfp->ep == target_lep && ISEQ_BODY(CFP_ISEQ(escape_cfp))->type == ISEQ_TYPE_METHOD) {
1915 if (target_ep == NULL) {
1916 goto valid_return;
1917 }
1918 else {
1919 goto unexpected_return;
1920 }
1921 }
1922
1923 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1924 }
1925 unexpected_return:;
1926 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1927
1928 valid_return:;
1929 /* do nothing */
1930 }
1931 else {
1932 rb_bug("isns(throw): unsupported throw type");
1933 }
1934
1935 ec->tag->state = state;
1936 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1937}
1938
1939static VALUE
1940vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1941 rb_num_t throw_state, VALUE throwobj)
1942{
1943 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1944 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1945
1946 if (state != 0) {
1947 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1948 }
1949 else {
1950 return vm_throw_continue(ec, throwobj);
1951 }
1952}
1953
1954VALUE
1955rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1956{
1957 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1958}
1959
1960static inline void
1961vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1962{
1963 int is_splat = flag & 0x01;
1964 const VALUE *ptr;
1965 rb_num_t len;
1966 const VALUE obj = ary;
1967
1968 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1969 ary = obj;
1970 ptr = &ary;
1971 len = 1;
1972 }
1973 else {
1974 ptr = RARRAY_CONST_PTR(ary);
1975 len = (rb_num_t)RARRAY_LEN(ary);
1976 }
1977
1978 if (num + is_splat == 0) {
1979 /* no space left on stack */
1980 }
1981 else if (flag & 0x02) {
1982 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1983 rb_num_t i = 0, j;
1984
1985 if (len < num) {
1986 for (i = 0; i < num - len; i++) {
1987 *cfp->sp++ = Qnil;
1988 }
1989 }
1990
1991 for (j = 0; i < num; i++, j++) {
1992 VALUE v = ptr[len - j - 1];
1993 *cfp->sp++ = v;
1994 }
1995
1996 if (is_splat) {
1997 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1998 }
1999 }
2000 else {
2001 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
2002 if (is_splat) {
2003 if (num > len) {
2004 *cfp->sp++ = rb_ary_new();
2005 }
2006 else {
2007 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2008 }
2009 }
2010
2011 if (num > len) {
2012 rb_num_t i = 0;
2013 for (; i < num - len; i++) {
2014 *cfp->sp++ = Qnil;
2015 }
2016
2017 for (rb_num_t j = 0; i < num; i++, j++) {
2018 *cfp->sp++ = ptr[len - j - 1];
2019 }
2020 }
2021 else {
2022 for (rb_num_t j = 0; j < num; j++) {
2023 *cfp->sp++ = ptr[num - j - 1];
2024 }
2025 }
2026 }
2027
2028 RB_GC_GUARD(ary);
2029}
2030
2031static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2032
2033static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2034
2035static struct rb_class_cc_entries *
2036vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2037{
2038 int initial_capa = 2;
2039 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2040#if VM_CHECK_MODE > 0
2041 ccs->debug_sig = ~(VALUE)ccs;
2042#endif
2043 ccs->capa = initial_capa;
2044 ccs->len = 0;
2045 ccs->cme = cme;
2046 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2047
2048 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2049 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2050 return ccs;
2051}
2052
2053static void
2054vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2055{
2056 if (! vm_cc_markable(cc)) {
2057 return;
2058 }
2059
2060 if (UNLIKELY(ccs->len == ccs->capa)) {
2061 RUBY_ASSERT(ccs->capa > 0);
2062 ccs->capa *= 2;
2063 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2064#if VM_CHECK_MODE > 0
2065 ccs->debug_sig = ~(VALUE)ccs;
2066#endif
2067 // GC?
2068 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2069 }
2070 VM_ASSERT(ccs->len < ccs->capa);
2071
2072 const int pos = ccs->len++;
2073 ccs->entries[pos].argc = vm_ci_argc(ci);
2074 ccs->entries[pos].flag = vm_ci_flag(ci);
2075 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2076
2077 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2078 // for tuning
2079 // vm_mtbl_dump(klass, 0);
2080 }
2081}
2082
2083#if VM_CHECK_MODE > 0
2084void
2085rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2086{
2087 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2088 for (int i=0; i<ccs->len; i++) {
2089 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2090 ccs->entries[i].flag,
2091 ccs->entries[i].argc);
2092 rp(ccs->entries[i].cc);
2093 }
2094}
2095
2096static int
2097vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2098{
2099 VM_ASSERT(vm_ccs_p(ccs));
2100 VM_ASSERT(ccs->len <= ccs->capa);
2101
2102 for (int i=0; i<ccs->len; i++) {
2103 const struct rb_callcache *cc = ccs->entries[i].cc;
2104
2105 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2106 VM_ASSERT(vm_cc_class_check(cc, klass));
2107 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2108 VM_ASSERT(!vm_cc_super_p(cc));
2109 VM_ASSERT(!vm_cc_refinement_p(cc));
2110 }
2111 return TRUE;
2112}
2113#endif
2114
2115const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2116
2117static void
2118vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2119{
2120 ASSERT_vm_locking();
2121
2122 if (rb_multi_ractor_p()) {
2123 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2124 // Another ractor updated the CC table while we were waiting on the VM lock.
2125 // We have to retry.
2126 return;
2127 }
2128
2129 VALUE ccs_obj = 0;
2130 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2131 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2132
2133 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2134 // Another ractor replaced that entry while we were waiting on the VM lock.
2135 return;
2136 }
2137
2138 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2139 rb_vm_cc_table_delete(new_table, mid);
2140 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2141 }
2142 else {
2143 rb_vm_cc_table_delete(cc_tbl, mid);
2144 }
2145}
2146
2147static const struct rb_callcache *
2148vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2149{
2150 ASSERT_vm_locking();
2151
2152 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2153
2154 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
2155
2156 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2157
2158 if (cme == NULL) {
2159 // undef or not found: can't cache the information
2160 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2161 return &vm_empty_cc;
2162 }
2163
2164 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2165 const VALUE original_cc_table = cc_tbl;
2166 if (!cc_tbl) {
2167 // Is this possible after rb_callable_method_entry ?
2168 cc_tbl = rb_vm_cc_table_create(1);
2169 }
2170 else if (rb_multi_ractor_p()) {
2171 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2172 }
2173
2174 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2175
2176 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2177
2178 VM_ASSERT(cc_tbl);
2179
2180 struct rb_class_cc_entries *ccs = NULL;
2181 {
2182 VALUE ccs_obj;
2183 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2184 ccs = (struct rb_class_cc_entries *)ccs_obj;
2185 }
2186 else {
2187 // TODO: required?
2188 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2189 }
2190 }
2191
2192 cme = rb_check_overloaded_cme(cme, ci);
2193
2194 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2195 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2196
2197 VM_ASSERT(vm_cc_cme(cc) != NULL);
2198 VM_ASSERT(cme->called_id == mid);
2199 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2200
2201 if (original_cc_table != cc_tbl) {
2202 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2203 }
2204
2205 return cc;
2206}
2207
2208static const struct rb_callcache *
2209vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2210{
2211 VALUE cc_tbl;
2212 struct rb_class_cc_entries *ccs;
2213retry:
2214 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2215 ccs = NULL;
2216
2217 if (cc_tbl) {
2218 // CCS data is keyed on method id, so we don't need the method id
2219 // for doing comparisons in the `for` loop below.
2220
2221 VALUE ccs_obj;
2222 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2223 ccs = (struct rb_class_cc_entries *)ccs_obj;
2224 const int ccs_len = ccs->len;
2225
2226 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2227 RB_VM_LOCKING() {
2228 vm_evict_cc(klass, cc_tbl, mid);
2229 }
2230 goto retry;
2231 }
2232 else {
2233 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2234
2235 // We already know the method id is correct because we had
2236 // to look up the ccs_data by method id. All we need to
2237 // compare is argc and flag
2238 unsigned int argc = vm_ci_argc(ci);
2239 unsigned int flag = vm_ci_flag(ci);
2240
2241 for (int i=0; i<ccs_len; i++) {
2242 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2243 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2244 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2245
2246 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2247
2248 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2249 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2250
2251 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2252 VM_ASSERT(ccs_cc->klass == klass);
2253 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2254
2255 return ccs_cc;
2256 }
2257 }
2258 }
2259 }
2260 }
2261
2262 RB_GC_GUARD(cc_tbl);
2263 return NULL;
2264}
2265
2266static const struct rb_callcache *
2267vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2268{
2269 const ID mid = vm_ci_mid(ci);
2270
2271 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2272 if (cc) {
2273 return cc;
2274 }
2275
2276 RB_VM_LOCKING() {
2277 if (rb_multi_ractor_p()) {
2278 // The CC may have been populated by another ractor while we were waiting on the lock,
2279 // so we must lookup a second time.
2280 cc = vm_lookup_cc(klass, ci, mid);
2281 }
2282
2283 if (!cc) {
2284 cc = vm_populate_cc(klass, ci, mid);
2285 }
2286 }
2287
2288 return cc;
2289}
2290
2291const struct rb_callcache *
2292rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2293{
2294 const struct rb_callcache *cc;
2295
2296 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2297
2298 cc = vm_search_cc(klass, ci);
2299
2300 VM_ASSERT(cc);
2301 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2302 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2303 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2304 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2305 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2306
2307 return cc;
2308}
2309
2310static const struct rb_callcache *
2311vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2312{
2313#if USE_DEBUG_COUNTER
2314 const struct rb_callcache *old_cc = cd->cc;
2315#endif
2316
2317 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2318
2319#if OPT_INLINE_METHOD_CACHE
2320 cd->cc = cc;
2321
2322 const struct rb_callcache *empty_cc = &vm_empty_cc;
2323 if (cd_owner && cc != empty_cc) {
2324 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2325 }
2326
2327#if USE_DEBUG_COUNTER
2328 if (!old_cc || old_cc == empty_cc) {
2329 // empty
2330 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2331 }
2332 else if (old_cc == cc) {
2333 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2334 }
2335 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2336 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2337 }
2338 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2339 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2340 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2341 }
2342 else {
2343 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2344 }
2345#endif
2346#endif // OPT_INLINE_METHOD_CACHE
2347
2348 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2349 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2350
2351 return cc;
2352}
2353
2354ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass));
2355static const struct rb_callcache *
2356vm_search_method_fastpath(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE klass)
2357{
2358 const struct rb_callcache *cc = cd->cc;
2359
2360#if OPT_INLINE_METHOD_CACHE
2361 if (LIKELY(vm_cc_class_check(cc, klass))) {
2362 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2363 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2364 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2365 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2366 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2367 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2368
2369 return cc;
2370 }
2371 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2372 }
2373 else {
2374 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2375 }
2376#endif
2377
2378 return vm_search_method_slowpath0((VALUE)CFP_ISEQ(reg_cfp), cd, klass);
2379}
2380
2381static const struct rb_callable_method_entry_struct *
2382vm_search_method(struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
2383{
2384 VALUE klass = CLASS_OF(recv);
2385 VM_ASSERT(klass != Qfalse);
2386 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2387
2388 const struct rb_callcache *cc = vm_search_method_fastpath(reg_cfp, cd, klass);
2389 return vm_cc_cme(cc);
2390}
2391
2393rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2394{
2395 // Called from ZJIT with the compile-time iseq, which may differ from
2396 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2397 VALUE klass = CLASS_OF(recv);
2398 const struct rb_callcache *cc = vm_search_method_slowpath0(cd_owner, cd, klass);
2399 return vm_cc_cme(cc);
2400}
2401
2402#if __has_attribute(transparent_union)
2403typedef union {
2404 VALUE (*anyargs)(ANYARGS);
2405 VALUE (*f00)(VALUE);
2406 VALUE (*f01)(VALUE, VALUE);
2407 VALUE (*f02)(VALUE, VALUE, VALUE);
2408 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2409 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2410 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2411 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2412 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2421 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2422} __attribute__((__transparent_union__)) cfunc_type;
2423# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2424#else
2425typedef VALUE (*cfunc_type)(ANYARGS);
2426# define make_cfunc_type(f) (cfunc_type)(f)
2427#endif
2428
2429static inline int
2430check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2431{
2432 if (! me) {
2433 return false;
2434 }
2435 else {
2436 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2437 VM_ASSERT(callable_method_entry_p(me));
2438 VM_ASSERT(me->def);
2439 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2440 return false;
2441 }
2442 else {
2443#if __has_attribute(transparent_union)
2444 return me->def->body.cfunc.func == func.anyargs;
2445#else
2446 return me->def->body.cfunc.func == func;
2447#endif
2448 }
2449 }
2450}
2451
2452static inline int
2453check_method_basic_definition(const rb_callable_method_entry_t *me)
2454{
2455 return me && METHOD_ENTRY_BASIC(me);
2456}
2457
2458static inline int
2459vm_method_cfunc_is(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv, cfunc_type func)
2460{
2461 VM_ASSERT(reg_cfp != NULL);
2462 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
2463 return check_cfunc(cme, func);
2464}
2465
2466bool
2467rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2468{
2469 return check_cfunc(me, func);
2470}
2471
2472int
2473rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2474{
2475 // Called from ZJIT with the compile-time iseq, which may differ from
2476 // the iseq on the current CFP. Use the slowpath to avoid stale caches.
2477 VALUE klass = CLASS_OF(recv);
2478 const struct rb_callcache *cc = vm_search_method_slowpath0((VALUE)iseq, cd, klass);
2479 const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc);
2480 return check_cfunc(cme, func);
2481}
2482
2483#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2484#define vm_method_cfunc_is(reg_cfp, cd, recv, func) vm_method_cfunc_is(reg_cfp, cd, recv, make_cfunc_type(func))
2485
2486#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2487
2488static inline bool
2489FIXNUM_2_P(VALUE a, VALUE b)
2490{
2491 /* FIXNUM_P(a) && FIXNUM_P(b)
2492 * == ((a & 1) && (b & 1))
2493 * == a & b & 1 */
2494 SIGNED_VALUE x = a;
2495 SIGNED_VALUE y = b;
2496 SIGNED_VALUE z = x & y & 1;
2497 return z == 1;
2498}
2499
2500static inline bool
2501FLONUM_2_P(VALUE a, VALUE b)
2502{
2503#if USE_FLONUM
2504 /* FLONUM_P(a) && FLONUM_P(b)
2505 * == ((a & 3) == 2) && ((b & 3) == 2)
2506 * == ! ((a ^ 2) | (b ^ 2) & 3)
2507 */
2508 SIGNED_VALUE x = a;
2509 SIGNED_VALUE y = b;
2510 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2511 return !z;
2512#else
2513 return false;
2514#endif
2515}
2516
2517static VALUE
2518opt_equality_specialized(VALUE recv, VALUE obj)
2519{
2520 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2521 goto compare_by_identity;
2522 }
2523 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2524 goto compare_by_identity;
2525 }
2526 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2527 goto compare_by_identity;
2528 }
2529 else if (SPECIAL_CONST_P(recv)) {
2530 //
2531 }
2532 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2533 double a = RFLOAT_VALUE(recv);
2534 double b = RFLOAT_VALUE(obj);
2535
2536 return RBOOL(a == b);
2537 }
2538 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2539 if (recv == obj) {
2540 return Qtrue;
2541 }
2542 else if (RB_TYPE_P(obj, T_STRING)) {
2543 return rb_str_eql_internal(obj, recv);
2544 }
2545 }
2546 return Qundef;
2547
2548 compare_by_identity:
2549 return RBOOL(recv == obj);
2550}
2551
2552static VALUE
2553opt_equality(struct rb_control_frame_struct *reg_cfp, VALUE recv, VALUE obj, CALL_DATA cd)
2554{
2555 VM_ASSERT(reg_cfp != NULL);
2556
2557 VALUE val = opt_equality_specialized(recv, obj);
2558 if (!UNDEF_P(val)) return val;
2559
2560 if (!vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_equal)) {
2561 return Qundef;
2562 }
2563 else {
2564 return RBOOL(recv == obj);
2565 }
2566}
2567
2568#undef EQ_UNREDEFINED_P
2569
2570static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2571NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2572
2573static VALUE
2574opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2575{
2576 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2577
2578 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2579 return RBOOL(recv == obj);
2580 }
2581 else {
2582 return Qundef;
2583 }
2584}
2585
2586static VALUE
2587opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2588{
2589 VALUE val = opt_equality_specialized(recv, obj);
2590 if (!UNDEF_P(val)) {
2591 return val;
2592 }
2593 else {
2594 return opt_equality_by_mid_slowpath(recv, obj, mid);
2595 }
2596}
2597
2598VALUE
2599rb_equal_opt(VALUE obj1, VALUE obj2)
2600{
2601 return opt_equality_by_mid(obj1, obj2, idEq);
2602}
2603
2604VALUE
2605rb_eql_opt(VALUE obj1, VALUE obj2)
2606{
2607 return opt_equality_by_mid(obj1, obj2, idEqlP);
2608}
2609
2610extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2611extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2612
2613static VALUE
2614check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2615{
2616 switch (type) {
2617 case VM_CHECKMATCH_TYPE_WHEN:
2618 return pattern;
2619 case VM_CHECKMATCH_TYPE_RESCUE:
2620 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2621 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2622 }
2623 /* fall through */
2624 case VM_CHECKMATCH_TYPE_CASE: {
2625 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2626 }
2627 default:
2628 rb_bug("check_match: unreachable");
2629 }
2630}
2631
2632
2633static inline VALUE
2634double_cmp_lt(double a, double b)
2635{
2636 return RBOOL(a < b);
2637}
2638
2639static inline VALUE
2640double_cmp_le(double a, double b)
2641{
2642 return RBOOL(a <= b);
2643}
2644
2645static inline VALUE
2646double_cmp_gt(double a, double b)
2647{
2648 return RBOOL(a > b);
2649}
2650
2651static inline VALUE
2652double_cmp_ge(double a, double b)
2653{
2654 return RBOOL(a >= b);
2655}
2656
2657// Copied by vm_dump.c
2658static inline VALUE *
2659vm_base_ptr(const rb_control_frame_t *cfp)
2660{
2661 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2662
2663 if (CFP_ISEQ(cfp) && VM_FRAME_RUBYFRAME_P(cfp)) {
2664 VALUE *bp = prev_cfp->sp + ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size + VM_ENV_DATA_SIZE;
2665
2666 if (ISEQ_BODY(CFP_ISEQ(cfp))->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2667 int lts = ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size;
2668 int params = ISEQ_BODY(CFP_ISEQ(cfp))->param.size;
2669
2670 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2671 bp += vm_ci_argc(ci);
2672 }
2673
2674 if (ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2675 /* adjust `self' */
2676 bp += 1;
2677 }
2678#if VM_DEBUG_BP_CHECK
2679 if (bp != cfp->bp_check) {
2680 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2681 (long)(cfp->bp_check - GET_EC()->vm_stack),
2682 (long)(bp - GET_EC()->vm_stack));
2683 rb_bug("vm_base_ptr: unreachable");
2684 }
2685#endif
2686 return bp;
2687 }
2688 else {
2689 return NULL;
2690 }
2691}
2692
2693VALUE *
2694rb_vm_base_ptr(const rb_control_frame_t *cfp)
2695{
2696 return vm_base_ptr(cfp);
2697}
2698
2699/* method call processes with call_info */
2700
2701#include "vm_args.c"
2702
2703static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2704ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2705static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2706static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2707static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2708static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2709static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2710
2711static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2712
2713static VALUE
2714vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2715{
2716 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2717
2718 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2719}
2720
2721static VALUE
2722vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2723{
2724 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2725
2726 const struct rb_callcache *cc = calling->cc;
2727 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2728 int param = ISEQ_BODY(iseq)->param.size;
2729 int local = ISEQ_BODY(iseq)->local_table_size;
2730 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2731}
2732
2733bool
2734rb_simple_iseq_p(const rb_iseq_t *iseq)
2735{
2736 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2737 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2738 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2739 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2740 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2741 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2742 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2743 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2744 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2745}
2746
2747bool
2748rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2749{
2750 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2751 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2752 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2753 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2754 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2757 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2758 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2759}
2760
2761bool
2762rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2763{
2764 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2765 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2766 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2767 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2768 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2769 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2770 ISEQ_BODY(iseq)->param.flags.has_block == FALSE &&
2771 ISEQ_BODY(iseq)->param.flags.accepts_no_block == FALSE;
2772}
2773
2774#define ALLOW_HEAP_ARGV (-2)
2775#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2776
2777static inline bool
2778vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2779{
2780 vm_check_canary(GET_EC(), cfp->sp);
2781 bool ret = false;
2782
2783 if (!NIL_P(ary)) {
2784 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2785 long len = RARRAY_LEN(ary);
2786 int argc = calling->argc;
2787
2788 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2789 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2790 * a temporary array, instead of trying to keeping arguments on the VM stack.
2791 */
2792 VALUE *argv = cfp->sp - argc;
2793 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2794 rb_ary_cat(argv_ary, argv, argc);
2795 rb_ary_cat(argv_ary, ptr, len);
2796 cfp->sp -= argc - 1;
2797 cfp->sp[-1] = argv_ary;
2798 calling->argc = 1;
2799 calling->heap_argv = argv_ary;
2800 RB_GC_GUARD(ary);
2801 }
2802 else {
2803 long i;
2804
2805 if (max_args >= 0 && len + argc > max_args) {
2806 /* If only a given max_args is allowed, copy up to max args.
2807 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2808 * where additional arguments are ignored.
2809 *
2810 * Also, copy up to one more argument than the maximum,
2811 * in case it is an empty keyword hash that will be removed.
2812 */
2813 calling->argc += len - (max_args - argc + 1);
2814 len = max_args - argc + 1;
2815 ret = true;
2816 }
2817 else {
2818 /* Unset heap_argv if set originally. Can happen when
2819 * forwarding modified arguments, where heap_argv was used
2820 * originally, but heap_argv not supported by the forwarded
2821 * method in all cases.
2822 */
2823 calling->heap_argv = 0;
2824 }
2825 CHECK_VM_STACK_OVERFLOW(cfp, len);
2826
2827 for (i = 0; i < len; i++) {
2828 *cfp->sp++ = ptr[i];
2829 }
2830 calling->argc += i;
2831 }
2832 }
2833
2834 return ret;
2835}
2836
2837static inline void
2838vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2839{
2840 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2841 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2842 const VALUE h = rb_hash_new_with_size(kw_len);
2843 VALUE *sp = cfp->sp;
2844 int i;
2845
2846 for (i=0; i<kw_len; i++) {
2847 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2848 }
2849 (sp-kw_len)[0] = h;
2850
2851 cfp->sp -= kw_len - 1;
2852 calling->argc -= kw_len - 1;
2853 calling->kw_splat = 1;
2854}
2855
2856static inline VALUE
2857vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2858{
2859 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2860 if (keyword_hash != Qnil) {
2861 /* Convert a non-hash keyword splat to a new hash */
2862 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2863 }
2864 }
2865 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2866 /* Convert a hash keyword splat to a new hash unless
2867 * a mutable keyword splat was passed.
2868 * Skip allocating new hash for empty keyword splat, as empty
2869 * keyword splat will be ignored by both callers.
2870 */
2871 keyword_hash = rb_hash_dup(keyword_hash);
2872 }
2873 return keyword_hash;
2874}
2875
2876static inline void
2877CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2878 struct rb_calling_info *restrict calling,
2879 const struct rb_callinfo *restrict ci, int max_args)
2880{
2881 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2882 if (IS_ARGS_KW_SPLAT(ci)) {
2883 // f(*a, **kw)
2884 VM_ASSERT(calling->kw_splat == 1);
2885
2886 cfp->sp -= 2;
2887 calling->argc -= 2;
2888 VALUE ary = cfp->sp[0];
2889 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2890
2891 // splat a
2892 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2893
2894 // put kw
2895 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2896 if (UNLIKELY(calling->heap_argv)) {
2897 rb_ary_push(calling->heap_argv, kwh);
2898 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2899 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2900 calling->kw_splat = 0;
2901 }
2902 }
2903 else {
2904 cfp->sp[0] = kwh;
2905 cfp->sp++;
2906 calling->argc++;
2907
2908 VM_ASSERT(calling->kw_splat == 1);
2909 }
2910 }
2911 else {
2912 calling->kw_splat = 0;
2913 }
2914 }
2915 else {
2916 // f(*a)
2917 VM_ASSERT(calling->kw_splat == 0);
2918
2919 cfp->sp -= 1;
2920 calling->argc -= 1;
2921 VALUE ary = cfp->sp[0];
2922
2923 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2924 goto check_keyword;
2925 }
2926
2927 // check the last argument
2928 VALUE last_hash, argv_ary;
2929 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2930 if (!IS_ARGS_KEYWORD(ci) &&
2931 RARRAY_LEN(argv_ary) > 0 &&
2932 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2933 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2934
2935 rb_ary_pop(argv_ary);
2936 if (!RHASH_EMPTY_P(last_hash)) {
2937 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2938 calling->kw_splat = 1;
2939 }
2940 }
2941 }
2942 else {
2943check_keyword:
2944 if (!IS_ARGS_KEYWORD(ci) &&
2945 calling->argc > 0 &&
2946 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2947 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2948
2949 if (RHASH_EMPTY_P(last_hash)) {
2950 calling->argc--;
2951 cfp->sp -= 1;
2952 }
2953 else {
2954 cfp->sp[-1] = rb_hash_dup(last_hash);
2955 calling->kw_splat = 1;
2956 }
2957 }
2958 }
2959 }
2960 }
2961 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2962 // f(**kw)
2963 VM_ASSERT(calling->kw_splat == 1);
2964 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2965
2966 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2967 cfp->sp--;
2968 calling->argc--;
2969 calling->kw_splat = 0;
2970 }
2971 else {
2972 cfp->sp[-1] = kwh;
2973 }
2974 }
2975 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2976 // f(k1:1, k2:2)
2977 VM_ASSERT(calling->kw_splat == 0);
2978
2979 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2980 * by creating a keyword hash.
2981 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2982 */
2983 vm_caller_setup_arg_kw(cfp, calling, ci);
2984 }
2985}
2986
2987#define USE_OPT_HIST 0
2988
2989#if USE_OPT_HIST
2990#define OPT_HIST_MAX 64
2991static int opt_hist[OPT_HIST_MAX+1];
2992
2993__attribute__((destructor))
2994static void
2995opt_hist_show_results_at_exit(void)
2996{
2997 for (int i=0; i<OPT_HIST_MAX; i++) {
2998 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2999 }
3000}
3001#endif
3002
3003static VALUE
3004vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3005 struct rb_calling_info *calling)
3006{
3007 const struct rb_callcache *cc = calling->cc;
3008 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3009 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3010 const int opt = calling->argc - lead_num;
3011 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3012 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3013 const int param = ISEQ_BODY(iseq)->param.size;
3014 const int local = ISEQ_BODY(iseq)->local_table_size;
3015 const int delta = opt_num - opt;
3016
3017 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3018
3019#if USE_OPT_HIST
3020 if (opt_pc < OPT_HIST_MAX) {
3021 opt_hist[opt]++;
3022 }
3023 else {
3024 opt_hist[OPT_HIST_MAX]++;
3025 }
3026#endif
3027
3028 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3029}
3030
3031static VALUE
3032vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3033 struct rb_calling_info *calling)
3034{
3035 const struct rb_callcache *cc = calling->cc;
3036 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3037 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3038 const int opt = calling->argc - lead_num;
3039 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3040
3041 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3042
3043#if USE_OPT_HIST
3044 if (opt_pc < OPT_HIST_MAX) {
3045 opt_hist[opt]++;
3046 }
3047 else {
3048 opt_hist[OPT_HIST_MAX]++;
3049 }
3050#endif
3051
3052 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3053}
3054
3055static void
3056args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3057 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3058 VALUE *const locals);
3059
3060static VALUE
3061vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3062 struct rb_calling_info *calling)
3063{
3064 const struct rb_callcache *cc = calling->cc;
3065 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3066 int param_size = ISEQ_BODY(iseq)->param.size;
3067 int local_size = ISEQ_BODY(iseq)->local_table_size;
3068
3069 // Setting up local size and param size
3070 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3071
3072 local_size = local_size + vm_ci_argc(calling->cd->ci);
3073 param_size = param_size + vm_ci_argc(calling->cd->ci);
3074
3075 cfp->sp[0] = (VALUE)calling->cd->ci;
3076
3077 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3078}
3079
3080static VALUE
3081vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3082 struct rb_calling_info *calling)
3083{
3084 const struct rb_callinfo *ci = calling->cd->ci;
3085 const struct rb_callcache *cc = calling->cc;
3086
3087 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3088 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3089
3090 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3091 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3092 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3093 const int ci_kw_len = kw_arg->keyword_len;
3094 const VALUE * const ci_keywords = kw_arg->keywords;
3095 VALUE *argv = cfp->sp - calling->argc;
3096 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3097 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3098 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3099 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3100 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3101
3102 int param = ISEQ_BODY(iseq)->param.size;
3103 int local = ISEQ_BODY(iseq)->local_table_size;
3104 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3105}
3106
3107static VALUE
3108vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3109 struct rb_calling_info *calling)
3110{
3111 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3112 const struct rb_callcache *cc = calling->cc;
3113
3114 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3115 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3116
3117 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3118 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3119 VALUE * const argv = cfp->sp - calling->argc;
3120 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3121
3122 int i;
3123 for (i=0; i<kw_param->num; i++) {
3124 klocals[i] = kw_param->default_values[i];
3125 }
3126 klocals[i] = INT2FIX(0); // kw specify flag
3127 // NOTE:
3128 // nobody check this value, but it should be cleared because it can
3129 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3130
3131 int param = ISEQ_BODY(iseq)->param.size;
3132 int local = ISEQ_BODY(iseq)->local_table_size;
3133 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3134}
3135
3136static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3137
3138static VALUE
3139vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3140 struct rb_calling_info *calling)
3141{
3142 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3143 cfp->sp -= (calling->argc + 1);
3144 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3145 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3146}
3147
3148VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3149
3150static void
3151warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3152{
3153 rb_vm_t *vm = GET_VM();
3154 set_table *dup_check_table = &vm->unused_block_warning_table;
3155 st_data_t key;
3156 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3157
3158 union {
3159 VALUE v;
3160 unsigned char b[SIZEOF_VALUE];
3161 } k1 = {
3162 .v = (VALUE)pc,
3163 }, k2 = {
3164 .v = (VALUE)cme->def,
3165 };
3166
3167 // relax check
3168 if (!strict_unused_block) {
3169 key = (st_data_t)cme->def->original_id;
3170
3171 if (set_table_lookup(dup_check_table, key)) {
3172 return;
3173 }
3174 }
3175
3176 // strict check
3177 // make unique key from pc and me->def pointer
3178 key = 0;
3179 for (int i=0; i<SIZEOF_VALUE; i++) {
3180 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3181 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3182 }
3183
3184 if (0) {
3185 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3186 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3187 fprintf(stderr, "key:%p\n", (void *)key);
3188 }
3189
3190 // duplication check
3191 if (set_insert(dup_check_table, key)) {
3192 // already shown
3193 }
3194 else if (RTEST(ruby_verbose) || strict_unused_block) {
3195 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3196 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3197
3198 if (!NIL_P(m_loc)) {
3199 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3200 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3201 }
3202 else {
3203 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3204 }
3205 }
3206}
3207
3208static inline int
3209vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3210 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3211{
3212 const struct rb_callinfo *ci = calling->cd->ci;
3213 const struct rb_callcache *cc = calling->cc;
3214
3215 VM_ASSERT((vm_ci_argc(ci), 1));
3216 VM_ASSERT(vm_cc_cme(cc) != NULL);
3217
3218 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3219 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3220 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3221 warn_unused_block(vm_cc_cme(cc), iseq, (void *)CFP_PC(ec->cfp));
3222 }
3223
3224 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3225 if (LIKELY(rb_simple_iseq_p(iseq))) {
3226 rb_control_frame_t *cfp = ec->cfp;
3227 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3228 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3229
3230 if (calling->argc != lead_num) {
3231 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3232 }
3233
3234 //VM_ASSERT(ci == calling->cd->ci);
3235 VM_ASSERT(cc == calling->cc);
3236
3237 if (vm_call_iseq_optimizable_p(ci, cc)) {
3238 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) && ruby_vm_c_events_enabled == 0) {
3239 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3240 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3241 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3242 }
3243 else {
3244 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3245 }
3246 }
3247 return 0;
3248 }
3249 else if (rb_iseq_only_optparam_p(iseq)) {
3250 rb_control_frame_t *cfp = ec->cfp;
3251
3252 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3253 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3254
3255 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3256 const int argc = calling->argc;
3257 const int opt = argc - lead_num;
3258
3259 if (opt < 0 || opt > opt_num) {
3260 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3261 }
3262
3263 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3264 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3265 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3266 vm_call_cacheable(ci, cc));
3267 }
3268 else {
3269 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3270 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3271 vm_call_cacheable(ci, cc));
3272 }
3273
3274 /* initialize opt vars for self-references */
3275 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3276 for (int i=argc; i<lead_num + opt_num; i++) {
3277 argv[i] = Qnil;
3278 }
3279 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3280 }
3281 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3282 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3283 const int argc = calling->argc;
3284 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3285
3286 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3287 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3288
3289 if (argc - kw_arg->keyword_len == lead_num) {
3290 const int ci_kw_len = kw_arg->keyword_len;
3291 const VALUE * const ci_keywords = kw_arg->keywords;
3292 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3293 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3294
3295 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3296 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3297
3298 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3299 vm_call_cacheable(ci, cc));
3300
3301 return 0;
3302 }
3303 }
3304 else if (argc == lead_num) {
3305 /* no kwarg */
3306 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3307 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3308
3309 if (klocals[kw_param->num] == INT2FIX(0)) {
3310 /* copy from default_values */
3311 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3312 vm_call_cacheable(ci, cc));
3313 }
3314
3315 return 0;
3316 }
3317 }
3318 }
3319
3320 // Called iseq is using ... param
3321 // def foo(...) # <- iseq for foo will have "forwardable"
3322 //
3323 // We want to set the `...` local to the caller's CI
3324 // foo(1, 2) # <- the ci for this should end up as `...`
3325 //
3326 // So hopefully the stack looks like:
3327 //
3328 // => 1
3329 // => 2
3330 // => *
3331 // => **
3332 // => &
3333 // => ... # <- points at `foo`s CI
3334 // => cref_or_me
3335 // => specval
3336 // => type
3337 //
3338 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3339 bool can_fastpath = true;
3340
3341 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3342 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3343 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3344 ci = vm_ci_new_runtime(
3345 vm_ci_mid(ci),
3346 vm_ci_flag(ci),
3347 vm_ci_argc(ci),
3348 vm_ci_kwarg(ci));
3349 }
3350 else {
3351 ci = forward_cd->caller_ci;
3352 }
3353 can_fastpath = false;
3354 }
3355 // C functions calling iseqs will stack allocate a CI,
3356 // so we need to convert it to heap allocated
3357 if (!vm_ci_markable(ci)) {
3358 ci = vm_ci_new_runtime(
3359 vm_ci_mid(ci),
3360 vm_ci_flag(ci),
3361 vm_ci_argc(ci),
3362 vm_ci_kwarg(ci));
3363 can_fastpath = false;
3364 }
3365 argv[param_size - 1] = (VALUE)ci;
3366 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3367 return 0;
3368 }
3369
3370 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3371}
3372
3373static void
3374vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3375{
3376 // This case is when the caller is using a ... parameter.
3377 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3378 // In this case the caller's caller's CI will be on the stack.
3379 //
3380 // For example:
3381 //
3382 // def bar(a, b); a + b; end
3383 // def foo(...); bar(...); end
3384 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3385 //
3386 // Stack layout will be:
3387 //
3388 // > 1
3389 // > 2
3390 // > CI for foo(1, 2)
3391 // > cref_or_me
3392 // > specval
3393 // > type
3394 // > receiver
3395 // > CI for foo(1, 2), via `getlocal ...`
3396 // > ( SP points here )
3397 const VALUE * lep = VM_CF_LEP(cfp);
3398
3399 const rb_iseq_t *iseq;
3400
3401 // If we're in an escaped environment (lambda for example), get the iseq
3402 // from the captured env.
3403 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3404 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3405 iseq = env->iseq;
3406 }
3407 else { // Otherwise use the lep to find the caller
3408 iseq = CFP_ISEQ(rb_vm_search_cf_from_ep(ec, cfp, lep));
3409 }
3410
3411 // Our local storage is below the args we need to copy
3412 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3413
3414 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3415 VALUE * to = cfp->sp - 1; // clobber the CI
3416
3417 if (RTEST(splat)) {
3418 to -= 1; // clobber the splat array
3419 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3420 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3421 to += RARRAY_LEN(splat);
3422 }
3423
3424 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3425 MEMCPY(to, from, VALUE, argc);
3426 cfp->sp = to + argc;
3427
3428 // Stack layout should now be:
3429 //
3430 // > 1
3431 // > 2
3432 // > CI for foo(1, 2)
3433 // > cref_or_me
3434 // > specval
3435 // > type
3436 // > receiver
3437 // > 1
3438 // > 2
3439 // > ( SP points here )
3440}
3441
3442static VALUE
3443vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3444{
3445 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3446
3447 const struct rb_callcache *cc = calling->cc;
3448 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3449 int param_size = ISEQ_BODY(iseq)->param.size;
3450 int local_size = ISEQ_BODY(iseq)->local_table_size;
3451
3452 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3453
3454 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3455 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3456}
3457
3458static VALUE
3459vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3460{
3461 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3462
3463 const struct rb_callcache *cc = calling->cc;
3464 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3465 int param_size = ISEQ_BODY(iseq)->param.size;
3466 int local_size = ISEQ_BODY(iseq)->local_table_size;
3467
3468 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3469
3470 // Setting up local size and param size
3471 local_size = local_size + vm_ci_argc(calling->cd->ci);
3472 param_size = param_size + vm_ci_argc(calling->cd->ci);
3473
3474 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3475 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3476}
3477
3478static inline VALUE
3479vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3480 int opt_pc, int param_size, int local_size)
3481{
3482 const struct rb_callinfo *ci = calling->cd->ci;
3483 const struct rb_callcache *cc = calling->cc;
3484
3485 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3486 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3487 }
3488 else {
3489 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3490 }
3491}
3492
3493static inline VALUE
3494vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3495 int opt_pc, int param_size, int local_size)
3496{
3497 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3498 VALUE *argv = cfp->sp - calling->argc;
3499 VALUE *sp = argv + param_size;
3500 cfp->sp = argv - 1 /* recv */;
3501
3502 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3503 calling->block_handler, (VALUE)me,
3504 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3505 local_size - param_size,
3506 ISEQ_BODY(iseq)->stack_max);
3507 return Qundef;
3508}
3509
3510static inline VALUE
3511vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3512{
3513 const struct rb_callcache *cc = calling->cc;
3514 unsigned int i;
3515 VALUE *argv = cfp->sp - calling->argc;
3516 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3517 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3518 VALUE *src_argv = argv;
3519 VALUE *sp_orig, *sp;
3520 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3521
3522 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3523 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3524 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3525 dst_captured->code.val = src_captured->code.val;
3526 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3527 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3528 }
3529 else {
3530 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3531 }
3532 }
3533
3534 vm_pop_frame(ec, cfp, cfp->ep);
3535 cfp = ec->cfp;
3536
3537 sp_orig = sp = cfp->sp;
3538
3539 /* push self */
3540 sp[0] = calling->recv;
3541 sp++;
3542
3543 /* copy arguments */
3544 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3545 *sp++ = src_argv[i];
3546 }
3547
3548 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3549 calling->recv, calling->block_handler, (VALUE)me,
3550 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3551 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3552 ISEQ_BODY(iseq)->stack_max);
3553
3554 cfp->sp = sp_orig;
3555
3556 return Qundef;
3557}
3558
3559static void
3560ractor_unsafe_check(void)
3561{
3562 if (!rb_ractor_main_p()) {
3563 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3564 }
3565}
3566
3567static VALUE
3568call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3569{
3570 ractor_unsafe_check();
3571 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3572 return (*f)(recv, rb_ary_new4(argc, argv));
3573}
3574
3575static VALUE
3576call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3577{
3578 ractor_unsafe_check();
3579 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3580 return (*f)(argc, argv, recv);
3581}
3582
3583static VALUE
3584call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3585{
3586 ractor_unsafe_check();
3587 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3588 return (*f)(recv);
3589}
3590
3591static VALUE
3592call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3593{
3594 ractor_unsafe_check();
3595 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3596 return (*f)(recv, argv[0]);
3597}
3598
3599static VALUE
3600call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3601{
3602 ractor_unsafe_check();
3603 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3604 return (*f)(recv, argv[0], argv[1]);
3605}
3606
3607static VALUE
3608call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3609{
3610 ractor_unsafe_check();
3611 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3612 return (*f)(recv, argv[0], argv[1], argv[2]);
3613}
3614
3615static VALUE
3616call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3617{
3618 ractor_unsafe_check();
3619 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3620 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3621}
3622
3623static VALUE
3624call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3625{
3626 ractor_unsafe_check();
3627 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3628 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3629}
3630
3631static VALUE
3632call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3633{
3634 ractor_unsafe_check();
3636 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3637}
3638
3639static VALUE
3640call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3641{
3642 ractor_unsafe_check();
3644 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3645}
3646
3647static VALUE
3648call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3649{
3650 ractor_unsafe_check();
3652 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3653}
3654
3655static VALUE
3656call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657{
3658 ractor_unsafe_check();
3660 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3661}
3662
3663static VALUE
3664call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3665{
3666 ractor_unsafe_check();
3668 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3669}
3670
3671static VALUE
3672call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3673{
3674 ractor_unsafe_check();
3676 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3677}
3678
3679static VALUE
3680call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3681{
3682 ractor_unsafe_check();
3684 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3685}
3686
3687static VALUE
3688call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3689{
3690 ractor_unsafe_check();
3692 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3693}
3694
3695static VALUE
3696call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3697{
3698 ractor_unsafe_check();
3700 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3701}
3702
3703static VALUE
3704call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3705{
3706 ractor_unsafe_check();
3708 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3709}
3710
3711static VALUE
3712ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3713{
3714 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3715 return (*f)(recv, rb_ary_new4(argc, argv));
3716}
3717
3718static VALUE
3719ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3720{
3721 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3722 return (*f)(argc, argv, recv);
3723}
3724
3725static VALUE
3726ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3727{
3728 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3729 return (*f)(recv);
3730}
3731
3732static VALUE
3733ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3734{
3735 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3736 return (*f)(recv, argv[0]);
3737}
3738
3739static VALUE
3740ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3741{
3742 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3743 return (*f)(recv, argv[0], argv[1]);
3744}
3745
3746static VALUE
3747ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3748{
3749 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3750 return (*f)(recv, argv[0], argv[1], argv[2]);
3751}
3752
3753static VALUE
3754ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3755{
3756 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3757 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3758}
3759
3760static VALUE
3761ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3762{
3763 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3764 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3765}
3766
3767static VALUE
3768ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3769{
3771 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3772}
3773
3774static VALUE
3775ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3776{
3778 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3779}
3780
3781static VALUE
3782ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3783{
3785 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3786}
3787
3788static VALUE
3789ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3790{
3792 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3793}
3794
3795static VALUE
3796ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3797{
3799 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3800}
3801
3802static VALUE
3803ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3804{
3806 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3807}
3808
3809static VALUE
3810ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3811{
3813 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3814}
3815
3816static VALUE
3817ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3818{
3820 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3821}
3822
3823static VALUE
3824ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3825{
3827 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3828}
3829
3830static VALUE
3831ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3832{
3834 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3835}
3836
3837static inline int
3838vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3839{
3840 const int ov_flags = RAISED_STACKOVERFLOW;
3841 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3842 if (rb_ec_raised_p(ec, ov_flags)) {
3843 rb_ec_raised_reset(ec, ov_flags);
3844 return TRUE;
3845 }
3846 return FALSE;
3847}
3848
3849#define CHECK_CFP_CONSISTENCY(func) \
3850 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3851 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3852
3853static inline
3854const rb_method_cfunc_t *
3855vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3856{
3857#if VM_DEBUG_VERIFY_METHOD_CACHE
3858 switch (me->def->type) {
3859 case VM_METHOD_TYPE_CFUNC:
3860 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3861 break;
3862# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3863 METHOD_BUG(ISEQ);
3864 METHOD_BUG(ATTRSET);
3865 METHOD_BUG(IVAR);
3866 METHOD_BUG(BMETHOD);
3867 METHOD_BUG(ZSUPER);
3868 METHOD_BUG(UNDEF);
3869 METHOD_BUG(OPTIMIZED);
3870 METHOD_BUG(MISSING);
3871 METHOD_BUG(REFINED);
3872 METHOD_BUG(ALIAS);
3873# undef METHOD_BUG
3874 default:
3875 rb_bug("wrong method type: %d", me->def->type);
3876 }
3877#endif
3878 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3879}
3880
3881static VALUE
3882vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3883 int argc, VALUE *argv, VALUE *stack_bottom)
3884{
3885 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3886 const struct rb_callinfo *ci = calling->cd->ci;
3887 const struct rb_callcache *cc = calling->cc;
3888 VALUE val;
3889 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3890 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3891
3892 VALUE recv = calling->recv;
3893 VALUE block_handler = calling->block_handler;
3894 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3895
3896 if (UNLIKELY(calling->kw_splat)) {
3897 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3898 }
3899
3900 VM_ASSERT(reg_cfp == ec->cfp);
3901
3902 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3903 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3904
3905 vm_push_frame(ec, NULL, frame_type, recv,
3906 block_handler, (VALUE)me,
3907 0, ec->cfp->sp, 0, 0);
3908
3909 int len = cfunc->argc;
3910 if (len >= 0) rb_check_arity(argc, len, len);
3911
3912 reg_cfp->sp = stack_bottom;
3913 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3914
3915 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3916
3917 rb_vm_pop_frame(ec);
3918
3919 VM_ASSERT(ec->cfp->sp == stack_bottom);
3920
3921 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3922 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3923
3924 return val;
3925}
3926
3927// Push a C method frame for a given cme. This is called when JIT code skipped
3928// pushing a frame but the C method reached a point where a frame is needed.
3929void
3930rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3931{
3932 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3933 rb_execution_context_t *ec = GET_EC();
3934 VALUE *sp = ec->cfp->sp;
3935 VALUE recv = *(sp - recv_idx - 1);
3936 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3937 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3938#if VM_CHECK_MODE > 0
3939 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3940 *(GET_EC()->cfp->sp) = Qfalse;
3941#endif
3942 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3943}
3944
3945// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3946bool
3947rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3948{
3949 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3950}
3951
3952static VALUE
3953vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3954{
3955 int argc = calling->argc;
3956 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3957 VALUE *argv = &stack_bottom[1];
3958
3959 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3960}
3961
3962static VALUE
3963vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3964{
3965 const struct rb_callinfo *ci = calling->cd->ci;
3966 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3967
3968 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3969 VALUE argv_ary;
3970 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3971 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3972 int argc = RARRAY_LENINT(argv_ary);
3973 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3974 VALUE *stack_bottom = reg_cfp->sp - 2;
3975
3976 VM_ASSERT(calling->argc == 1);
3977 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3978 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3979
3980 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3981 }
3982 else {
3983 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3984
3985 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3986 }
3987}
3988
3989static inline VALUE
3990vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3991{
3992 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3993 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3994
3995 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3996 return vm_call_cfunc_other(ec, reg_cfp, calling);
3997 }
3998
3999 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4000 calling->kw_splat = 0;
4001 int i;
4002 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
4003 VALUE *sp = stack_bottom;
4004 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
4005 for(i = 0; i < argc; i++) {
4006 *++sp = argv[i];
4007 }
4008 reg_cfp->sp = sp+1;
4009
4010 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4011}
4012
4013static inline VALUE
4014vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4015{
4016 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4017 VALUE argv_ary = reg_cfp->sp[-1];
4018 int argc = RARRAY_LENINT(argv_ary);
4019 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4020 VALUE last_hash;
4021 int argc_offset = 0;
4022
4023 if (UNLIKELY(argc > 0 &&
4024 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4025 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4026 if (!RHASH_EMPTY_P(last_hash)) {
4027 return vm_call_cfunc_other(ec, reg_cfp, calling);
4028 }
4029 argc_offset++;
4030 }
4031 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4032}
4033
4034static inline VALUE
4035vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4036{
4037 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4038 VALUE keyword_hash = reg_cfp->sp[-1];
4039
4040 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4041 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4042 }
4043
4044 return vm_call_cfunc_other(ec, reg_cfp, calling);
4045}
4046
4047static VALUE
4048vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4049{
4050 const struct rb_callinfo *ci = calling->cd->ci;
4051 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4052
4053 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4054 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4055 // f(*a)
4056 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4057 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4058 }
4059 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4060 // f(*a, **kw)
4061 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4062 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4063 }
4064 }
4065
4066 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4067 return vm_call_cfunc_other(ec, reg_cfp, calling);
4068}
4069
4070static VALUE
4071vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4072{
4073 const struct rb_callcache *cc = calling->cc;
4074 RB_DEBUG_COUNTER_INC(ccf_ivar);
4075 cfp->sp -= 1;
4076 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4077 return ivar;
4078}
4079
4080static VALUE
4081vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4082{
4083 RB_DEBUG_COUNTER_INC(ccf_attrset);
4084 VALUE val = *(cfp->sp - 1);
4085 cfp->sp -= 2;
4086 attr_index_t index;
4087 shape_id_t dest_shape_id;
4088 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4089 ID id = vm_cc_cme(cc)->def->body.attr.id;
4090 rb_check_frozen(obj);
4091 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4092 if (UNDEF_P(res)) {
4093 switch (BUILTIN_TYPE(obj)) {
4094 case T_OBJECT:
4095 break;
4096 case T_CLASS:
4097 case T_MODULE:
4098 {
4099 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4100 if (!UNDEF_P(res)) {
4101 return res;
4102 }
4103 }
4104 break;
4105 default:
4106 {
4107 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4108 if (!UNDEF_P(res)) {
4109 return res;
4110 }
4111 }
4112 }
4113 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4114 }
4115 return res;
4116}
4117
4118static VALUE
4119vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4120{
4121 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4122}
4123
4124static inline VALUE
4125vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4126{
4127 rb_proc_t *proc;
4128 VALUE val;
4129 const struct rb_callcache *cc = calling->cc;
4130 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4131 VALUE procv = cme->def->body.bmethod.proc;
4132
4133 if (!RB_OBJ_SHAREABLE_P(procv) &&
4134 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4135 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4136 }
4137
4138 /* control block frame */
4139 GetProcPtr(procv, proc);
4140 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4141
4142 return val;
4143}
4144
4145static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4146
4147static VALUE
4148vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4149{
4150 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4151
4152 const struct rb_callcache *cc = calling->cc;
4153 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4154 VALUE procv = cme->def->body.bmethod.proc;
4155
4156 if (!RB_OBJ_SHAREABLE_P(procv) &&
4157 cme->def->body.bmethod.defined_ractor_id != rb_ec_ractor_id(ec)) {
4158 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4159 }
4160
4161 rb_proc_t *proc;
4162 GetProcPtr(procv, proc);
4163 const struct rb_block *block = &proc->block;
4164
4165 while (vm_block_type(block) == block_type_proc) {
4166 block = vm_proc_block(block->as.proc);
4167 }
4168 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4169
4170 const struct rb_captured_block *captured = &block->as.captured;
4171 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4172 VALUE * const argv = cfp->sp - calling->argc;
4173 const int arg_size = ISEQ_BODY(iseq)->param.size;
4174
4175 int opt_pc;
4176 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4177 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4178 }
4179 else {
4180 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4181 }
4182
4183 cfp->sp = argv - 1; // -1 for the receiver
4184
4185 vm_push_frame(ec, iseq,
4186 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4187 calling->recv,
4188 VM_GUARDED_PREV_EP(captured->ep),
4189 (VALUE)cme,
4190 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4191 argv + arg_size,
4192 ISEQ_BODY(iseq)->local_table_size - arg_size,
4193 ISEQ_BODY(iseq)->stack_max);
4194
4195 return Qundef;
4196}
4197
4198static VALUE
4199vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4200{
4201 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4202
4203 VALUE *argv;
4204 int argc;
4205 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4206 if (UNLIKELY(calling->heap_argv)) {
4207 argv = RARRAY_PTR(calling->heap_argv);
4208 cfp->sp -= 2;
4209 }
4210 else {
4211 argc = calling->argc;
4212 argv = ALLOCA_N(VALUE, argc);
4213 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4214 cfp->sp += - argc - 1;
4215 }
4216
4217 return vm_call_bmethod_body(ec, calling, argv);
4218}
4219
4220static VALUE
4221vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4222{
4223 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4224
4225 const struct rb_callcache *cc = calling->cc;
4226 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4227 VALUE procv = cme->def->body.bmethod.proc;
4228 rb_proc_t *proc;
4229 GetProcPtr(procv, proc);
4230 const struct rb_block *block = &proc->block;
4231
4232 while (vm_block_type(block) == block_type_proc) {
4233 block = vm_proc_block(block->as.proc);
4234 }
4235 if (vm_block_type(block) == block_type_iseq) {
4236 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4237 return vm_call_iseq_bmethod(ec, cfp, calling);
4238 }
4239
4240 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4241 return vm_call_noniseq_bmethod(ec, cfp, calling);
4242}
4243
4244VALUE
4245rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4246{
4247 VALUE klass = current_class;
4248
4249 /* for prepended Module, then start from cover class */
4250 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4251 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4252 klass = RBASIC_CLASS(klass);
4253 }
4254
4255 while (RTEST(klass)) {
4256 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4257 if (owner == target_owner) {
4258 return klass;
4259 }
4260 klass = RCLASS_SUPER(klass);
4261 }
4262
4263 return current_class; /* maybe module function */
4264}
4265
4266static const rb_callable_method_entry_t *
4267aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4268{
4269 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4270 const rb_callable_method_entry_t *cme;
4271
4272 if (orig_me->defined_class == 0) {
4273 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4274 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4275 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4276
4277 if (me->def->reference_count == 1) {
4278 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4279 }
4280 else {
4282 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4283 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4284 }
4285 }
4286 else {
4287 cme = (const rb_callable_method_entry_t *)orig_me;
4288 }
4289
4290 VM_ASSERT(callable_method_entry_p(cme));
4291 return cme;
4292}
4293
4295rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4296{
4297 return aliased_callable_method_entry(me);
4298}
4299
4300static VALUE
4301vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4302{
4303 calling->cc = &VM_CC_ON_STACK(Qundef,
4304 vm_call_general,
4305 {{0}},
4306 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4307
4308 return vm_call_method_each_type(ec, cfp, calling);
4309}
4310
4311static enum method_missing_reason
4312ci_missing_reason(const struct rb_callinfo *ci)
4313{
4314 enum method_missing_reason stat = MISSING_NOENTRY;
4315 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4316 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4317 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4318 return stat;
4319}
4320
4321static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4322
4323static VALUE
4324vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4325 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4326{
4327 ASSUME(calling->argc >= 0);
4328
4329 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4330 int argc = calling->argc;
4331 VALUE recv = calling->recv;
4332 VALUE klass = CLASS_OF(recv);
4333 ID mid = rb_check_id(&symbol);
4334 flags |= VM_CALL_OPT_SEND;
4335
4336 if (UNLIKELY(! mid)) {
4337 mid = idMethodMissing;
4338 missing_reason = ci_missing_reason(ci);
4339 ec->method_missing_reason = missing_reason;
4340
4341 VALUE argv_ary;
4342 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4343 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4344 rb_ary_unshift(argv_ary, symbol);
4345
4346 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4347 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4348 VALUE exc = rb_make_no_method_exception(
4349 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4350
4351 rb_exc_raise(exc);
4352 }
4353 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4354 }
4355 else {
4356 /* E.g. when argc == 2
4357 *
4358 * | | | | TOPN
4359 * | | +------+
4360 * | | +---> | arg1 | 0
4361 * +------+ | +------+
4362 * | arg1 | -+ +-> | arg0 | 1
4363 * +------+ | +------+
4364 * | arg0 | ---+ | sym | 2
4365 * +------+ +------+
4366 * | recv | | recv | 3
4367 * --+------+--------+------+------
4368 */
4369 int i = argc;
4370 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4371 INC_SP(1);
4372 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4373 argc = ++calling->argc;
4374
4375 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4376 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4377 TOPN(i) = symbol;
4378 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4379 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4380 VALUE exc = rb_make_no_method_exception(
4381 rb_eNoMethodError, 0, recv, argc, argv, priv);
4382
4383 rb_exc_raise(exc);
4384 }
4385 else {
4386 TOPN(i) = rb_str_intern(symbol);
4387 }
4388 }
4389 }
4390
4391 struct rb_forwarding_call_data new_fcd = {
4392 .cd = {
4393 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4394 .cc = NULL,
4395 },
4396 .caller_ci = NULL,
4397 };
4398
4399 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4400 calling->cd = &new_fcd.cd;
4401 }
4402 else {
4403 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4404 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4405 new_fcd.caller_ci = caller_ci;
4406 calling->cd = (struct rb_call_data *)&new_fcd;
4407 }
4408 calling->cc = &VM_CC_ON_STACK(klass,
4409 vm_call_general,
4410 { .method_missing_reason = missing_reason },
4411 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4412
4413 if (flags & VM_CALL_FCALL) {
4414 return vm_call_method(ec, reg_cfp, calling);
4415 }
4416
4417 const struct rb_callcache *cc = calling->cc;
4418 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4419
4420 if (vm_cc_cme(cc) != NULL) {
4421 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4422 case METHOD_VISI_PUBLIC: /* likely */
4423 return vm_call_method_each_type(ec, reg_cfp, calling);
4424 case METHOD_VISI_PRIVATE:
4425 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4426 break;
4427 case METHOD_VISI_PROTECTED:
4428 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4429 break;
4430 default:
4431 VM_UNREACHABLE(vm_call_method);
4432 }
4433 return vm_call_method_missing(ec, reg_cfp, calling);
4434 }
4435
4436 return vm_call_method_nome(ec, reg_cfp, calling);
4437}
4438
4439static VALUE
4440vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4441{
4442 const struct rb_callinfo *ci = calling->cd->ci;
4443 int i;
4444 VALUE sym;
4445
4446 i = calling->argc - 1;
4447
4448 if (calling->argc == 0) {
4449 rb_raise(rb_eArgError, "no method name given");
4450 }
4451
4452 sym = TOPN(i);
4453 /* E.g. when i == 2
4454 *
4455 * | | | | TOPN
4456 * +------+ | |
4457 * | arg1 | ---+ | | 0
4458 * +------+ | +------+
4459 * | arg0 | -+ +-> | arg1 | 1
4460 * +------+ | +------+
4461 * | sym | +---> | arg0 | 2
4462 * +------+ +------+
4463 * | recv | | recv | 3
4464 * --+------+--------+------+------
4465 */
4466 /* shift arguments */
4467 if (i > 0) {
4468 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4469 }
4470 calling->argc -= 1;
4471 DEC_SP(1);
4472
4473 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4474}
4475
4476static VALUE
4477vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4478{
4479 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4480 const struct rb_callinfo *ci = calling->cd->ci;
4481 int flags = VM_CALL_FCALL;
4482 VALUE sym;
4483
4484 VALUE argv_ary;
4485 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4486 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4487 sym = rb_ary_shift(argv_ary);
4488 flags |= VM_CALL_ARGS_SPLAT;
4489 if (calling->kw_splat) {
4490 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4491 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4492 calling->kw_splat = 0;
4493 }
4494 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4495 }
4496
4497 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4498 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4499}
4500
4501static VALUE
4502vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4503{
4504 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4505 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4506}
4507
4508static VALUE
4509vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4510{
4511 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4512
4513 const struct rb_callinfo *ci = calling->cd->ci;
4514 int flags = vm_ci_flag(ci);
4515
4516 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4517 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4518 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4519 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4520 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4521 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4522 }
4523
4524 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4525 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4526}
4527
4528static VALUE
4529vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4530 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4531{
4532 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4533
4534 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4535 unsigned int argc, flag;
4536
4537 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4538 argc = ++calling->argc;
4539
4540 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4541 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4542 vm_check_canary(ec, reg_cfp->sp);
4543 if (argc > 1) {
4544 MEMMOVE(argv+1, argv, VALUE, argc-1);
4545 }
4546 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4547 INC_SP(1);
4548
4549 ec->method_missing_reason = reason;
4550
4551 struct rb_forwarding_call_data new_fcd = {
4552 .cd = {
4553 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4554 .cc = NULL,
4555 },
4556 .caller_ci = NULL,
4557 };
4558
4559 if (!(flag & VM_CALL_FORWARDING)) {
4560 calling->cd = &new_fcd.cd;
4561 }
4562 else {
4563 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4564 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4565 new_fcd.caller_ci = caller_ci;
4566 calling->cd = (struct rb_call_data *)&new_fcd;
4567 }
4568
4569 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4570 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4571 return vm_call_method(ec, reg_cfp, calling);
4572}
4573
4574static VALUE
4575vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4576{
4577 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4578}
4579
4580static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4581static VALUE
4582vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4583{
4584 klass = RCLASS_SUPER(klass);
4585
4586 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4587 if (cme == NULL) {
4588 return vm_call_method_nome(ec, cfp, calling);
4589 }
4590 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4591 cme->def->body.refined.orig_me) {
4592 cme = refined_method_callable_without_refinement(cme);
4593 }
4594
4595 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4596
4597 return vm_call_method_each_type(ec, cfp, calling);
4598}
4599
4600static inline VALUE
4601find_refinement(VALUE refinements, VALUE klass)
4602{
4603 if (NIL_P(refinements)) {
4604 return Qnil;
4605 }
4606 return rb_hash_lookup(refinements, klass);
4607}
4608
4609PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4610static rb_control_frame_t *
4611current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4612{
4613 rb_control_frame_t *top_cfp = cfp;
4614
4615 if (CFP_ISEQ(cfp) && ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_BLOCK) {
4616 const rb_iseq_t *local_iseq = ISEQ_BODY(CFP_ISEQ(cfp))->local_iseq;
4617
4618 do {
4619 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4620 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4621 /* TODO: orphan block */
4622 return top_cfp;
4623 }
4624 } while (CFP_ISEQ(cfp) != local_iseq);
4625 }
4626 return cfp;
4627}
4628
4629static const rb_callable_method_entry_t *
4630refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4631{
4632 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4633 const rb_callable_method_entry_t *cme;
4634
4635 if (orig_me->defined_class == 0) {
4636 cme = NULL;
4638 }
4639 else {
4640 cme = (const rb_callable_method_entry_t *)orig_me;
4641 }
4642
4643 VM_ASSERT(callable_method_entry_p(cme));
4644
4645 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4646 cme = NULL;
4647 }
4648
4649 return cme;
4650}
4651
4652static const rb_callable_method_entry_t *
4653search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4654{
4655 ID mid = vm_ci_mid(calling->cd->ci);
4656 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4657 const struct rb_callcache * const cc = calling->cc;
4658 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4659
4660 for (; cref; cref = CREF_NEXT(cref)) {
4661 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4662 if (NIL_P(refinement)) continue;
4663
4664 const rb_callable_method_entry_t *const ref_me =
4665 rb_callable_method_entry(refinement, mid);
4666
4667 if (ref_me) {
4668 if (vm_cc_call(cc) == vm_call_super_method) {
4669 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4670 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4671 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4672 continue;
4673 }
4674 }
4675
4676 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4677 cme->def != ref_me->def) {
4678 cme = ref_me;
4679 }
4680 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4681 return cme;
4682 }
4683 }
4684 else {
4685 return NULL;
4686 }
4687 }
4688
4689 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4690 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4691 }
4692 else {
4693 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4694 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4695 return cme;
4696 }
4697}
4698
4699static VALUE
4700vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4701{
4702 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4703
4704 if (ref_cme) {
4705 if (calling->cd->cc) {
4706 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4707 RB_OBJ_WRITE(CFP_ISEQ(cfp), &calling->cd->cc, cc);
4708 return vm_call_method(ec, cfp, calling);
4709 }
4710 else {
4711 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4712 calling->cc= ref_cc;
4713 return vm_call_method(ec, cfp, calling);
4714 }
4715 }
4716 else {
4717 return vm_call_method_nome(ec, cfp, calling);
4718 }
4719}
4720
4721static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4722
4723NOINLINE(static VALUE
4724 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4725 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4726
4727static VALUE
4728vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4729 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4730{
4731 int argc = calling->argc;
4732
4733 /* remove self */
4734 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4735 DEC_SP(1);
4736
4737 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4738}
4739
4740static VALUE
4741vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4742{
4743 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4744
4745 const struct rb_callinfo *ci = calling->cd->ci;
4746 VALUE procval = calling->recv;
4747 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4748}
4749
4750static VALUE
4751vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4752{
4753 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4754
4755 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4756 const struct rb_callinfo *ci = calling->cd->ci;
4757
4758 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4759 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4760 }
4761 else {
4762 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4763 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4764 return vm_call_general(ec, reg_cfp, calling);
4765 }
4766}
4767
4768static VALUE
4769vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4770{
4771 VALUE recv = calling->recv;
4772
4773 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4774 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4775 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4776
4777 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4778 return RSTRUCT_GET_RAW(recv, off);
4779}
4780
4781static VALUE
4782vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4783{
4784 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4785
4786 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4787 reg_cfp->sp -= 1;
4788 return ret;
4789}
4790
4791static VALUE
4792vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4793{
4794 VALUE recv = calling->recv;
4795
4796 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4797 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4798 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4799
4800 rb_check_frozen(recv);
4801
4802 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4803 RSTRUCT_SET_RAW(recv, off, val);
4804
4805 return val;
4806}
4807
4808static VALUE
4809vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4810{
4811 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4812
4813 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4814 reg_cfp->sp -= 2;
4815 return ret;
4816}
4817
4818NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4819 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4820
4821#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4822 if (UNLIKELY(ruby_vm_c_events_enabled > 0)) { \
4823 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4824 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4825 var = func; \
4826 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4827 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4828 } \
4829 else { \
4830 nohook; \
4831 var = func; \
4832 }
4833
4834static VALUE
4835vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4836 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4837{
4838 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4839 case OPTIMIZED_METHOD_TYPE_SEND:
4840 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4841 return vm_call_opt_send(ec, cfp, calling);
4842 case OPTIMIZED_METHOD_TYPE_CALL:
4843 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4844 return vm_call_opt_call(ec, cfp, calling);
4845 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4846 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4847 return vm_call_opt_block_call(ec, cfp, calling);
4848 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4849 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4850 rb_check_arity(calling->argc, 0, 0);
4851
4852 VALUE v;
4853 VM_CALL_METHOD_ATTR(v,
4854 vm_call_opt_struct_aref(ec, cfp, calling),
4855 set_vm_cc_ivar(cc); \
4856 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4857 return v;
4858 }
4859 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4860 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4861 rb_check_arity(calling->argc, 1, 1);
4862
4863 VALUE v;
4864 VM_CALL_METHOD_ATTR(v,
4865 vm_call_opt_struct_aset(ec, cfp, calling),
4866 set_vm_cc_ivar(cc); \
4867 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4868 return v;
4869 }
4870 default:
4871 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4872 }
4873}
4874
4875static VALUE
4876vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4877{
4878 const struct rb_callinfo *ci = calling->cd->ci;
4879 const struct rb_callcache *cc = calling->cc;
4880 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4881 VALUE v;
4882
4883 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4884
4885 switch (cme->def->type) {
4886 case VM_METHOD_TYPE_ISEQ:
4887 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4888 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4889 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4890 }
4891 else {
4892 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4893 return vm_call_iseq_setup(ec, cfp, calling);
4894 }
4895
4896 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4897 case VM_METHOD_TYPE_CFUNC:
4898 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4899 return vm_call_cfunc(ec, cfp, calling);
4900
4901 case VM_METHOD_TYPE_ATTRSET:
4902 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4903
4904 rb_check_arity(calling->argc, 1, 1);
4905
4906 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4907
4908 if (vm_cc_markable(cc)) {
4909 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4910 VM_CALL_METHOD_ATTR(v,
4911 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4912 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4913 }
4914 else {
4915 cc = &((struct rb_callcache) {
4916 .flags = T_IMEMO |
4917 (imemo_callcache << FL_USHIFT) |
4918 VM_CALLCACHE_UNMARKABLE |
4919 VM_CALLCACHE_ON_STACK,
4920 .klass = cc->klass,
4921 .cme_ = cc->cme_,
4922 .call_ = cc->call_,
4923 .aux_ = {
4924 .attr = {
4925 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4926 }
4927 },
4928 });
4929
4930 VM_CALL_METHOD_ATTR(v,
4931 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4932 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4933 }
4934 return v;
4935
4936 case VM_METHOD_TYPE_IVAR:
4937 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4938 rb_check_arity(calling->argc, 0, 0);
4939 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4940 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4941 VM_CALL_METHOD_ATTR(v,
4942 vm_call_ivar(ec, cfp, calling),
4943 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4944 return v;
4945
4946 case VM_METHOD_TYPE_MISSING:
4947 vm_cc_method_missing_reason_set(cc, 0);
4948 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4949 return vm_call_method_missing(ec, cfp, calling);
4950
4951 case VM_METHOD_TYPE_BMETHOD:
4952 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4953 return vm_call_bmethod(ec, cfp, calling);
4954
4955 case VM_METHOD_TYPE_ALIAS:
4956 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4957 return vm_call_alias(ec, cfp, calling);
4958
4959 case VM_METHOD_TYPE_OPTIMIZED:
4960 return vm_call_optimized(ec, cfp, calling, ci, cc);
4961
4962 case VM_METHOD_TYPE_UNDEF:
4963 break;
4964
4965 case VM_METHOD_TYPE_ZSUPER:
4966 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4967
4968 case VM_METHOD_TYPE_REFINED:
4969 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4970 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4971 return vm_call_refined(ec, cfp, calling);
4972 }
4973
4974 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4975}
4976
4977NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4978
4979static VALUE
4980vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4981{
4982 /* method missing */
4983 const struct rb_callinfo *ci = calling->cd->ci;
4984 const int stat = ci_missing_reason(ci);
4985
4986 if (vm_ci_mid(ci) == idMethodMissing) {
4987 if (UNLIKELY(calling->heap_argv)) {
4988 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4989 }
4990 else {
4991 rb_control_frame_t *reg_cfp = cfp;
4992 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4993 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4994 }
4995 }
4996 else {
4997 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4998 }
4999}
5000
5001/* Protected method calls and super invocations need to check that the receiver
5002 * (self for super) inherits the module on which the method is defined.
5003 * In the case of refinements, it should consider the original class not the
5004 * refinement.
5005 */
5006static VALUE
5007vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
5008{
5009 VALUE defined_class = me->defined_class;
5010 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5011 return NIL_P(refined_class) ? defined_class : refined_class;
5012}
5013
5014static inline VALUE
5015vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5016{
5017 const struct rb_callinfo *ci = calling->cd->ci;
5018 const struct rb_callcache *cc = calling->cc;
5019
5020 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5021
5022 if (vm_cc_cme(cc) != NULL) {
5023 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5024 case METHOD_VISI_PUBLIC: /* likely */
5025 return vm_call_method_each_type(ec, cfp, calling);
5026
5027 case METHOD_VISI_PRIVATE:
5028 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5029 enum method_missing_reason stat = MISSING_PRIVATE;
5030 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5031
5032 vm_cc_method_missing_reason_set(cc, stat);
5033 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5034 return vm_call_method_missing(ec, cfp, calling);
5035 }
5036 return vm_call_method_each_type(ec, cfp, calling);
5037
5038 case METHOD_VISI_PROTECTED:
5039 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5040 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5041 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5042 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5043 return vm_call_method_missing(ec, cfp, calling);
5044 }
5045 else {
5046 /* caching method info to dummy cc */
5047 VM_ASSERT(vm_cc_cme(cc) != NULL);
5048 struct rb_callcache cc_on_stack = *cc;
5049 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5050 calling->cc = &cc_on_stack;
5051 return vm_call_method_each_type(ec, cfp, calling);
5052 }
5053 }
5054 return vm_call_method_each_type(ec, cfp, calling);
5055
5056 default:
5057 rb_bug("unreachable");
5058 }
5059 }
5060 else {
5061 return vm_call_method_nome(ec, cfp, calling);
5062 }
5063}
5064
5065static VALUE
5066vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5067{
5068 RB_DEBUG_COUNTER_INC(ccf_general);
5069 return vm_call_method(ec, reg_cfp, calling);
5070}
5071
5072void
5073rb_vm_cc_general(const struct rb_callcache *cc)
5074{
5075 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5076 VM_ASSERT(cc != vm_cc_empty());
5077
5078 *(vm_call_handler *)&cc->call_ = vm_call_general;
5079}
5080
5081static VALUE
5082vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5083{
5084 RB_DEBUG_COUNTER_INC(ccf_super_method);
5085
5086 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5087 // can merge the function and the address of the function becomes same.
5088 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5089 if (ec == NULL) rb_bug("unreachable");
5090
5091 /* this check is required to distinguish with other functions. */
5092 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5093 return vm_call_method(ec, reg_cfp, calling);
5094}
5095
5096/* super */
5097
5098static inline VALUE
5099vm_search_normal_superclass(VALUE klass)
5100{
5101 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5102 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5103 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5104 klass = RBASIC(klass)->klass;
5105 }
5106 klass = RCLASS_ORIGIN(klass);
5107 return RCLASS_SUPER(klass);
5108}
5109
5110NORETURN(static void vm_super_outside(void));
5111
5112static void
5113vm_super_outside(void)
5114{
5115 rb_raise(rb_eNoMethodError, "super called outside of method");
5116}
5117
5118static const struct rb_callcache *
5119empty_cc_for_super(void)
5120{
5121 return &vm_empty_cc_for_super;
5122}
5123
5124static const struct rb_callcache *
5125vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5126{
5127 VALUE current_defined_class;
5128 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
5129 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5130
5131 if (!me) {
5132 vm_super_outside();
5133 }
5134
5135 current_defined_class = vm_defined_class_for_protected_call(me);
5136
5137 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5138 iseq != method_entry_iseqptr(me) &&
5139 !rb_obj_is_kind_of(recv, current_defined_class)) {
5140 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5141 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5142
5143 if (m) { /* not bound UnboundMethod */
5144 rb_raise(rb_eTypeError,
5145 "self has wrong type to call super in this context: "
5146 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5147 rb_obj_class(recv), m);
5148 }
5149 }
5150
5151 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5152 rb_raise(rb_eRuntimeError,
5153 "implicit argument passing of super from method defined"
5154 " by define_method() is not supported."
5155 " Specify all arguments explicitly.");
5156 }
5157
5158 ID mid = me->def->original_id;
5159
5160 if (!vm_ci_markable(cd->ci)) {
5161 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5162 }
5163 else {
5164 // update iseq. really? (TODO)
5165 cd->ci = vm_ci_new_runtime(mid,
5166 vm_ci_flag(cd->ci),
5167 vm_ci_argc(cd->ci),
5168 vm_ci_kwarg(cd->ci));
5169
5170 RB_OBJ_WRITTEN(iseq, Qundef, cd->ci);
5171 }
5172
5173 const struct rb_callcache *cc;
5174
5175 VALUE klass = vm_search_normal_superclass(me->defined_class);
5176
5177 if (!klass) {
5178 /* bound instance method of module */
5179 cc = vm_cc_new(Qundef, NULL, vm_call_method_missing, cc_type_super);
5180 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5181 }
5182 else {
5183 cc = vm_search_method_fastpath(reg_cfp, cd, klass);
5184 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5185
5186 // define_method can cache for different method id
5187 if (cached_cme == NULL) {
5188 // empty_cc_for_super is not markable object
5189 cd->cc = empty_cc_for_super();
5190 }
5191 else if (cached_cme->called_id != mid) {
5192 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5193 if (cme) {
5194 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5195 RB_OBJ_WRITE(iseq, &cd->cc, cc);
5196 }
5197 else {
5198 cd->cc = cc = empty_cc_for_super();
5199 }
5200 }
5201 else {
5202 switch (cached_cme->def->type) {
5203 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5204 case VM_METHOD_TYPE_REFINED:
5205 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5206 case VM_METHOD_TYPE_ATTRSET:
5207 case VM_METHOD_TYPE_IVAR:
5208 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5209 break;
5210 default:
5211 break; // use fastpath
5212 }
5213 }
5214 }
5215
5216 VM_ASSERT((vm_cc_cme(cc), true));
5217
5218 return cc;
5219}
5220
5221/* yield */
5222
5223static inline int
5224block_proc_is_lambda(const VALUE procval)
5225{
5226 rb_proc_t *proc;
5227
5228 if (procval) {
5229 GetProcPtr(procval, proc);
5230 return proc->is_lambda;
5231 }
5232 else {
5233 return 0;
5234 }
5235}
5236
5237static VALUE
5238vm_yield_with_cfunc(rb_execution_context_t *ec,
5239 const struct rb_captured_block *captured,
5240 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5242{
5243 int is_lambda = FALSE; /* TODO */
5244 VALUE val, arg, blockarg;
5245 int frame_flag;
5246 const struct vm_ifunc *ifunc = captured->code.ifunc;
5247
5248 if (is_lambda) {
5249 arg = rb_ary_new4(argc, argv);
5250 }
5251 else if (argc == 0) {
5252 arg = Qnil;
5253 }
5254 else {
5255 arg = argv[0];
5256 }
5257
5258 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5259
5260 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5261 if (kw_splat) {
5262 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5263 }
5264
5265 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5266 frame_flag,
5267 self,
5268 VM_GUARDED_PREV_EP(captured->ep),
5269 (VALUE)me,
5270 0, ec->cfp->sp, 0, 0);
5271 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5272 rb_vm_pop_frame(ec);
5273
5274 return val;
5275}
5276
5277VALUE
5278rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5279{
5280 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5281}
5282
5283static VALUE
5284vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5285{
5286 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5287}
5288
5289static inline int
5290vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5291{
5292 int i;
5293 long len = RARRAY_LEN(ary);
5294
5295 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5296
5297 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5298 argv[i] = RARRAY_AREF(ary, i);
5299 }
5300
5301 return i;
5302}
5303
5304static inline VALUE
5305vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5306{
5307 VALUE ary, arg0 = argv[0];
5308 ary = rb_check_array_type(arg0);
5309#if 0
5310 argv[0] = arg0;
5311#else
5312 VM_ASSERT(argv[0] == arg0);
5313#endif
5314 return ary;
5315}
5316
5317static int
5318vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5319{
5320 if (rb_simple_iseq_p(iseq)) {
5321 rb_control_frame_t *cfp = ec->cfp;
5322 VALUE arg0;
5323
5324 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5325
5326 if (arg_setup_type == arg_setup_block &&
5327 calling->argc == 1 &&
5328 ISEQ_BODY(iseq)->param.flags.has_lead &&
5329 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5330 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5331 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5332 }
5333
5334 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5335 if (arg_setup_type == arg_setup_block) {
5336 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5337 int i;
5338 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5339 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5340 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5341 }
5342 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5343 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5344 }
5345 }
5346 else {
5347 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5348 }
5349 }
5350
5351 return 0;
5352 }
5353 else {
5354 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5355 }
5356}
5357
5358static int
5359vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5360{
5361 struct rb_calling_info calling_entry, *calling;
5362
5363 calling = &calling_entry;
5364 calling->argc = argc;
5365 calling->block_handler = block_handler;
5366 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5367 calling->recv = Qundef;
5368 calling->heap_argv = 0;
5369 calling->cc = NULL;
5370 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5371
5372 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5373}
5374
5375/* ruby iseq -> ruby block */
5376
5377static VALUE
5378vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5379 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5380 bool is_lambda, VALUE block_handler)
5381{
5382 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5383 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5384 const int arg_size = ISEQ_BODY(iseq)->param.size;
5385 VALUE * const rsp = GET_SP() - calling->argc;
5386 VALUE * const argv = rsp;
5387 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5388 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5389
5390 SET_SP(rsp);
5391
5392 vm_push_frame(ec, iseq,
5393 frame_flag,
5394 captured->self,
5395 VM_GUARDED_PREV_EP(captured->ep), 0,
5396 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5397 rsp + arg_size,
5398 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5399
5400 return Qundef;
5401}
5402
5403static VALUE
5404vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5405 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5406 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5407{
5408 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5409 int flags = vm_ci_flag(ci);
5410
5411 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5412 ((calling->argc == 0) ||
5413 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5414 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5415 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5416 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5417 flags = 0;
5418 if (UNLIKELY(calling->heap_argv)) {
5419#if VM_ARGC_STACK_MAX < 0
5420 if (RARRAY_LEN(calling->heap_argv) < 1) {
5421 rb_raise(rb_eArgError, "no receiver given");
5422 }
5423#endif
5424 calling->recv = rb_ary_shift(calling->heap_argv);
5425 // Modify stack to avoid cfp consistency error
5426 reg_cfp->sp++;
5427 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5428 reg_cfp->sp[-2] = calling->recv;
5429 flags |= VM_CALL_ARGS_SPLAT;
5430 }
5431 else {
5432 if (calling->argc < 1) {
5433 rb_raise(rb_eArgError, "no receiver given");
5434 }
5435 calling->recv = TOPN(--calling->argc);
5436 }
5437 if (calling->kw_splat) {
5438 flags |= VM_CALL_KW_SPLAT;
5439 }
5440 }
5441 else {
5442 if (calling->argc < 1) {
5443 rb_raise(rb_eArgError, "no receiver given");
5444 }
5445 calling->recv = TOPN(--calling->argc);
5446 }
5447
5448 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5449}
5450
5451static VALUE
5452vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5453 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5454 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5455{
5456 VALUE val;
5457 int argc;
5458 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5459 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5460 argc = calling->argc;
5461 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5462 POPN(argc); /* TODO: should put before C/yield? */
5463 return val;
5464}
5465
5466static VALUE
5467vm_proc_to_block_handler(VALUE procval)
5468{
5469 const struct rb_block *block = vm_proc_block(procval);
5470
5471 switch (vm_block_type(block)) {
5472 case block_type_iseq:
5473 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5474 case block_type_ifunc:
5475 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5476 case block_type_symbol:
5477 return VM_BH_FROM_SYMBOL(block->as.symbol);
5478 case block_type_proc:
5479 return VM_BH_FROM_PROC(block->as.proc);
5480 }
5481 VM_UNREACHABLE(vm_yield_with_proc);
5482 return Qundef;
5483}
5484
5485static VALUE
5486vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5487 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5488 bool is_lambda, VALUE block_handler)
5489{
5490 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5491 VALUE proc = VM_BH_TO_PROC(block_handler);
5492 is_lambda = block_proc_is_lambda(proc);
5493 block_handler = vm_proc_to_block_handler(proc);
5494 }
5495
5496 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5497}
5498
5499static inline VALUE
5500vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5501 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5502 bool is_lambda, VALUE block_handler)
5503{
5504 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5505 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5506 bool is_lambda, VALUE block_handler);
5507
5508 switch (vm_block_handler_type(block_handler)) {
5509 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5510 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5511 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5512 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5513 default: rb_bug("vm_invoke_block: unreachable");
5514 }
5515
5516 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5517}
5518
5519static VALUE
5520vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5521{
5522 const rb_execution_context_t *ec = GET_EC();
5523 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5524 struct rb_captured_block *captured;
5525
5526 if (cfp == 0) {
5527 rb_bug("vm_make_proc_with_iseq: unreachable");
5528 }
5529
5530 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5531 captured->code.iseq = blockiseq;
5532
5533 return rb_vm_make_proc(ec, captured, rb_cProc);
5534}
5535
5536static VALUE
5537vm_once_exec(VALUE iseq)
5538{
5539 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5540 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5541}
5542
5543static VALUE
5544vm_once_clear(VALUE data)
5545{
5546 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5547 is->once.running_thread = NULL;
5548 return Qnil;
5549}
5550
5551/* defined insn */
5552
5553static bool
5554check_respond_to_missing(VALUE obj, VALUE v)
5555{
5556 VALUE args[2];
5557 VALUE r;
5558
5559 args[0] = obj; args[1] = Qfalse;
5560 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5561 if (!UNDEF_P(r) && RTEST(r)) {
5562 return true;
5563 }
5564 else {
5565 return false;
5566 }
5567}
5568
5569static bool
5570vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5571{
5572 VALUE klass;
5573 enum defined_type type = (enum defined_type)op_type;
5574
5575 switch (type) {
5576 case DEFINED_IVAR:
5577 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5578 break;
5579 case DEFINED_GVAR:
5580 return rb_gvar_defined(SYM2ID(obj));
5581 break;
5582 case DEFINED_CVAR: {
5583 const rb_cref_t *cref = vm_get_cref(GET_EP());
5584 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5585 return rb_cvar_defined(klass, SYM2ID(obj));
5586 break;
5587 }
5588 case DEFINED_CONST:
5589 case DEFINED_CONST_FROM: {
5590 bool allow_nil = type == DEFINED_CONST;
5591 klass = v;
5592 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5593 break;
5594 }
5595 case DEFINED_FUNC:
5596 klass = CLASS_OF(v);
5597 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5598 break;
5599 case DEFINED_METHOD:{
5600 VALUE klass = CLASS_OF(v);
5601 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5602
5603 if (me) {
5604 switch (METHOD_ENTRY_VISI(me)) {
5605 case METHOD_VISI_PRIVATE:
5606 break;
5607 case METHOD_VISI_PROTECTED:
5608 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5609 break;
5610 }
5611 case METHOD_VISI_PUBLIC:
5612 return true;
5613 break;
5614 default:
5615 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5616 }
5617 }
5618 else {
5619 return check_respond_to_missing(obj, v);
5620 }
5621 break;
5622 }
5623 case DEFINED_YIELD:
5624 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5625 return true;
5626 }
5627 break;
5628 case DEFINED_ZSUPER:
5629 {
5630 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5631
5632 if (me) {
5633 VALUE klass = vm_search_normal_superclass(me->defined_class);
5634 if (!klass) return false;
5635
5636 ID id = me->def->original_id;
5637
5638 return rb_method_boundp(klass, id, 0);
5639 }
5640 }
5641 break;
5642 case DEFINED_REF:
5643 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5644 default:
5645 rb_bug("unimplemented defined? type (VM)");
5646 break;
5647 }
5648
5649 return false;
5650}
5651
5652bool
5653rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5654{
5655 return vm_defined(ec, reg_cfp, op_type, obj, v);
5656}
5657
5658static const VALUE *
5659vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5660{
5661 rb_num_t i;
5662 const VALUE *ep = reg_ep;
5663 for (i = 0; i < lv; i++) {
5664 ep = GET_PREV_EP(ep);
5665 }
5666 return ep;
5667}
5668
5669static VALUE
5670vm_get_special_object(const VALUE *const reg_ep,
5671 enum vm_special_object_type type)
5672{
5673 switch (type) {
5674 case VM_SPECIAL_OBJECT_VMCORE:
5675 return rb_mRubyVMFrozenCore;
5676 case VM_SPECIAL_OBJECT_CBASE:
5677 return vm_get_cbase(reg_ep);
5678 case VM_SPECIAL_OBJECT_CONST_BASE:
5679 return vm_get_const_base(reg_ep);
5680 default:
5681 rb_bug("putspecialobject insn: unknown value_type %d", type);
5682 }
5683}
5684
5685// ZJIT implementation is using the C function
5686// and needs to call a non-static function
5687VALUE
5688rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5689{
5690 return vm_get_special_object(reg_ep, type);
5691}
5692
5693static VALUE
5694vm_concat_array(VALUE ary1, VALUE ary2st)
5695{
5696 const VALUE ary2 = ary2st;
5697 VALUE tmp1 = rb_check_to_array(ary1);
5698 VALUE tmp2 = rb_check_to_array(ary2);
5699
5700 if (NIL_P(tmp1)) {
5701 tmp1 = rb_ary_new3(1, ary1);
5702 }
5703 if (tmp1 == ary1) {
5704 tmp1 = rb_ary_dup(ary1);
5705 }
5706
5707 if (NIL_P(tmp2)) {
5708 return rb_ary_push(tmp1, ary2);
5709 }
5710 else {
5711 return rb_ary_concat(tmp1, tmp2);
5712 }
5713}
5714
5715static VALUE
5716vm_concat_to_array(VALUE ary1, VALUE ary2st)
5717{
5718 /* ary1 must be a newly created array */
5719 const VALUE ary2 = ary2st;
5720
5721 if (NIL_P(ary2)) return ary1;
5722
5723 VALUE tmp2 = rb_check_to_array(ary2);
5724
5725 if (NIL_P(tmp2)) {
5726 return rb_ary_push(ary1, ary2);
5727 }
5728 else {
5729 return rb_ary_concat(ary1, tmp2);
5730 }
5731}
5732
5733// YJIT implementation is using the C function
5734// and needs to call a non-static function
5735VALUE
5736rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5737{
5738 return vm_concat_array(ary1, ary2st);
5739}
5740
5741VALUE
5742rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5743{
5744 return vm_concat_to_array(ary1, ary2st);
5745}
5746
5747static VALUE
5748vm_splat_array(VALUE flag, VALUE ary)
5749{
5750 if (NIL_P(ary)) {
5751 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5752 }
5753 VALUE tmp = rb_check_to_array(ary);
5754 if (NIL_P(tmp)) {
5755 return rb_ary_new3(1, ary);
5756 }
5757 else if (RTEST(flag)) {
5758 return rb_ary_dup(tmp);
5759 }
5760 else {
5761 return tmp;
5762 }
5763}
5764
5765// YJIT implementation is using the C function
5766// and needs to call a non-static function
5767VALUE
5768rb_vm_splat_array(VALUE flag, VALUE ary)
5769{
5770 return vm_splat_array(flag, ary);
5771}
5772
5773static VALUE
5774vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5775{
5776 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5777
5778 if (flag & VM_CHECKMATCH_ARRAY) {
5779 long i;
5780 const long n = RARRAY_LEN(pattern);
5781
5782 for (i = 0; i < n; i++) {
5783 VALUE v = RARRAY_AREF(pattern, i);
5784 VALUE c = check_match(ec, v, target, type);
5785
5786 if (RTEST(c)) {
5787 return c;
5788 }
5789 }
5790 return Qfalse;
5791 }
5792 else {
5793 return check_match(ec, pattern, target, type);
5794 }
5795}
5796
5797VALUE
5798rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5799{
5800 return vm_check_match(ec, target, pattern, flag);
5801}
5802
5803static VALUE
5804vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5805{
5806 const VALUE kw_bits = *(ep - bits);
5807
5808 if (FIXNUM_P(kw_bits)) {
5809 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5810 if ((idx < VM_KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5811 return Qfalse;
5812 }
5813 else {
5814 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5815 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5816 }
5817 return Qtrue;
5818}
5819
5820static void
5821vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5822{
5823 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5824 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5825 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5826 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5827
5828 switch (flag) {
5829 case RUBY_EVENT_CALL:
5830 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5831 return;
5832 case RUBY_EVENT_C_CALL:
5833 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5834 return;
5835 case RUBY_EVENT_RETURN:
5836 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5837 return;
5839 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5840 return;
5841 }
5842 }
5843}
5844
5845static VALUE
5846vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5847{
5848 if (!rb_const_defined_at(cbase, id)) {
5849 return 0;
5850 }
5851 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5852 return rb_public_const_get_at(cbase, id);
5853 }
5854 else {
5855 return rb_const_get_at(cbase, id);
5856 }
5857}
5858
5859static VALUE
5860vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5861{
5862 if (!RB_TYPE_P(klass, T_CLASS)) {
5863 return 0;
5864 }
5865 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5866 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5867
5868 if (tmp != super) {
5869 rb_raise(rb_eTypeError,
5870 "superclass mismatch for class %"PRIsVALUE"",
5871 rb_id2str(id));
5872 }
5873 else {
5874 return klass;
5875 }
5876 }
5877 else {
5878 return klass;
5879 }
5880}
5881
5882static VALUE
5883vm_check_if_module(ID id, VALUE mod)
5884{
5885 if (!RB_TYPE_P(mod, T_MODULE)) {
5886 return 0;
5887 }
5888 else {
5889 return mod;
5890 }
5891}
5892
5893static VALUE
5894declare_under(ID id, VALUE cbase, VALUE c)
5895{
5896 rb_set_class_path_string(c, cbase, rb_id2str(id));
5897 rb_const_set(cbase, id, c);
5898 return c;
5899}
5900
5901static VALUE
5902vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5903{
5904 /* new class declaration */
5905 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5906 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5907 rb_class_inherited(s, c);
5908 return c;
5909}
5910
5911static VALUE
5912vm_declare_module(ID id, VALUE cbase)
5913{
5914 /* new module declaration */
5915 return declare_under(id, cbase, rb_module_new());
5916}
5917
5918NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5919static void
5920unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5921{
5922 VALUE name = rb_id2str(id);
5923 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5924 name, type);
5925 VALUE location = rb_const_source_location_at(cbase, id);
5926 if (!NIL_P(location)) {
5927 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5928 " previous definition of %"PRIsVALUE" was here",
5929 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5930 }
5932}
5933
5934static VALUE
5935vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5936{
5937 VALUE klass;
5938
5939 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5940 rb_raise(rb_eTypeError,
5941 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5942 rb_obj_class(super));
5943 }
5944
5945 vm_check_if_namespace(cbase);
5946
5947 /* find klass */
5948 rb_autoload_load(cbase, id);
5949
5950 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5951 if (!vm_check_if_class(id, flags, super, klass))
5952 unmatched_redefinition("class", cbase, id, klass);
5953 return klass;
5954 }
5955 else {
5956 return vm_declare_class(id, flags, cbase, super);
5957 }
5958}
5959
5960static VALUE
5961vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5962{
5963 VALUE mod;
5964
5965 vm_check_if_namespace(cbase);
5966 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5967 if (!vm_check_if_module(id, mod))
5968 unmatched_redefinition("module", cbase, id, mod);
5969 return mod;
5970 }
5971 else {
5972 return vm_declare_module(id, cbase);
5973 }
5974}
5975
5976static VALUE
5977vm_find_or_create_class_by_id(ID id,
5978 rb_num_t flags,
5979 VALUE cbase,
5980 VALUE super)
5981{
5982 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5983
5984 switch (type) {
5985 case VM_DEFINECLASS_TYPE_CLASS:
5986 /* classdef returns class scope value */
5987 return vm_define_class(id, flags, cbase, super);
5988
5989 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5990 /* classdef returns class scope value */
5991 return rb_singleton_class(cbase);
5992
5993 case VM_DEFINECLASS_TYPE_MODULE:
5994 /* classdef returns class scope value */
5995 return vm_define_module(id, flags, cbase);
5996
5997 default:
5998 rb_bug("unknown defineclass type: %d", (int)type);
5999 }
6000}
6001
6002static rb_method_visibility_t
6003vm_scope_visibility_get(const rb_execution_context_t *ec)
6004{
6005 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6006
6007 if (!vm_env_cref_by_cref(cfp->ep)) {
6008 return METHOD_VISI_PUBLIC;
6009 }
6010 else {
6011 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6012 }
6013}
6014
6015static int
6016vm_scope_module_func_check(const rb_execution_context_t *ec)
6017{
6018 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6019
6020 if (!vm_env_cref_by_cref(cfp->ep)) {
6021 return FALSE;
6022 }
6023 else {
6024 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6025 }
6026}
6027
6028static void
6029vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6030{
6031 VALUE klass;
6032 rb_method_visibility_t visi;
6033 rb_cref_t *cref = vm_ec_cref(ec);
6034
6035 if (is_singleton) {
6036 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6037 visi = METHOD_VISI_PUBLIC;
6038 }
6039 else {
6040 klass = CREF_CLASS_FOR_DEFINITION(cref);
6041 visi = vm_scope_visibility_get(ec);
6042 }
6043
6044 if (NIL_P(klass)) {
6045 rb_raise(rb_eTypeError, "no class/module to add method");
6046 }
6047
6048 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6049 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6050 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) &&
6051 !RCLASS_SINGLETON_P(klass) &&
6052 (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6053 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6054 }
6055
6056 if (!is_singleton && vm_scope_module_func_check(ec)) {
6057 klass = rb_singleton_class(klass);
6058 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6059 }
6060}
6061
6062// Return the untagged block handler:
6063// * If it's VM_BLOCK_HANDLER_NONE, return nil
6064// * If it's an ISEQ or an IFUNC, fetch it from its rb_captured_block
6065// * If it's a PROC or SYMBOL, return it as is
6066VALUE
6067rb_vm_untag_block_handler(VALUE block_handler)
6068{
6069 if (VM_BLOCK_HANDLER_NONE == block_handler) return Qnil;
6070
6071 switch (vm_block_handler_type(block_handler)) {
6072 case block_handler_type_iseq:
6073 case block_handler_type_ifunc: {
6074 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
6075 return captured->code.val;
6076 }
6077 case block_handler_type_proc:
6078 case block_handler_type_symbol:
6079 return block_handler;
6080 default:
6081 rb_bug("rb_vm_untag_block_handler: unreachable");
6082 }
6083}
6084
6085VALUE
6086rb_vm_get_untagged_block_handler(rb_control_frame_t *reg_cfp)
6087{
6088 return rb_vm_untag_block_handler(VM_CF_BLOCK_HANDLER(reg_cfp));
6089}
6090
6091static VALUE
6092vm_invokeblock_i(struct rb_execution_context_struct *ec,
6093 struct rb_control_frame_struct *reg_cfp,
6094 struct rb_calling_info *calling)
6095{
6096 const struct rb_callinfo *ci = calling->cd->ci;
6097 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6098
6099 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6100 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6101 }
6102 else {
6103 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6104 }
6105}
6106
6107enum method_explorer_type {
6108 mexp_search_method,
6109 mexp_search_invokeblock,
6110 mexp_search_super,
6111};
6112
6113static inline VALUE
6114vm_sendish(
6115 struct rb_execution_context_struct *ec,
6116 struct rb_control_frame_struct *reg_cfp,
6117 struct rb_call_data *cd,
6118 VALUE block_handler,
6119 enum method_explorer_type method_explorer
6120) {
6121 VALUE val = Qundef;
6122 const struct rb_callinfo *ci = cd->ci;
6123 const struct rb_callcache *cc;
6124 int argc = vm_ci_argc(ci);
6125 VALUE recv = TOPN(argc);
6126 struct rb_calling_info calling = {
6127 .block_handler = block_handler,
6128 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6129 .recv = recv,
6130 .argc = argc,
6131 .cd = cd,
6132 };
6133
6134 switch (method_explorer) {
6135 case mexp_search_method:
6136 calling.cc = cc = vm_search_method_fastpath(reg_cfp, cd, CLASS_OF(recv));
6137 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6138 break;
6139 case mexp_search_super:
6140 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6141 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6142 break;
6143 case mexp_search_invokeblock:
6144 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6145 break;
6146 }
6147 return val;
6148}
6149
6150VALUE
6151rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6152{
6153 stack_check(ec);
6154 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6155 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6156 VM_EXEC(ec, val);
6157 return val;
6158}
6159
6160// Fallback for YJIT/ZJIT, not used by the interpreter
6161VALUE
6162rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6163{
6164 stack_check(ec);
6165
6166 struct rb_forwarding_call_data adjusted_cd;
6167 struct rb_callinfo adjusted_ci;
6168
6169 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6170
6171 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6172
6173 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6174 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6175 }
6176
6177 VM_EXEC(ec, val);
6178 return val;
6179}
6180
6181VALUE
6182rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6183{
6184 stack_check(ec);
6185 VALUE bh = VM_BLOCK_HANDLER_NONE;
6186 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6187 VM_EXEC(ec, val);
6188 return val;
6189}
6190
6191VALUE
6192rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6193{
6194 stack_check(ec);
6195
6196 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6197 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6198
6199 VM_EXEC(ec, val);
6200 return val;
6201}
6202
6203// Fallback for YJIT/ZJIT, not used by the interpreter
6204VALUE
6205rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6206{
6207 stack_check(ec);
6208 struct rb_forwarding_call_data adjusted_cd;
6209 struct rb_callinfo adjusted_ci;
6210
6211 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6212
6213 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6214
6215 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6216 RB_OBJ_WRITE(CFP_ISEQ(GET_CFP()), &cd->cc, adjusted_cd.cd.cc);
6217 }
6218
6219 VM_EXEC(ec, val);
6220 return val;
6221}
6222
6223VALUE
6224rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6225{
6226 stack_check(ec);
6227 VALUE bh = VM_BLOCK_HANDLER_NONE;
6228 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6229 VM_EXEC(ec, val);
6230 return val;
6231}
6232
6233/* object.c */
6234VALUE rb_nil_to_s(VALUE);
6235VALUE rb_true_to_s(VALUE);
6236VALUE rb_false_to_s(VALUE);
6237/* numeric.c */
6238VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6239VALUE rb_fix_to_s(VALUE);
6240/* variable.c */
6241VALUE rb_mod_to_s(VALUE);
6243
6244static VALUE
6245vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6246{
6247 int type = TYPE(recv);
6248 if (type == T_STRING) {
6249 return recv;
6250 }
6251
6252 const struct rb_callable_method_entry_struct *cme = vm_search_method(reg_cfp, cd, recv);
6253
6254 switch (type) {
6255 case T_SYMBOL:
6256 if (check_method_basic_definition(cme)) {
6257 // rb_sym_to_s() allocates a mutable string, but since we are only
6258 // going to use this string for interpolation, it's fine to use the
6259 // frozen string.
6260 return rb_sym2str(recv);
6261 }
6262 break;
6263 case T_MODULE:
6264 case T_CLASS:
6265 if (check_cfunc(cme, rb_mod_to_s)) {
6266 // rb_mod_to_s() allocates a mutable string, but since we are only
6267 // going to use this string for interpolation, it's fine to use the
6268 // frozen string.
6269 VALUE val = rb_mod_name(recv);
6270 if (NIL_P(val)) {
6271 val = rb_mod_to_s(recv);
6272 }
6273 return val;
6274 }
6275 break;
6276 case T_NIL:
6277 if (check_cfunc(cme, rb_nil_to_s)) {
6278 return rb_nil_to_s(recv);
6279 }
6280 break;
6281 case T_TRUE:
6282 if (check_cfunc(cme, rb_true_to_s)) {
6283 return rb_true_to_s(recv);
6284 }
6285 break;
6286 case T_FALSE:
6287 if (check_cfunc(cme, rb_false_to_s)) {
6288 return rb_false_to_s(recv);
6289 }
6290 break;
6291 case T_FIXNUM:
6292 if (check_cfunc(cme, rb_int_to_s)) {
6293 return rb_fix_to_s(recv);
6294 }
6295 break;
6296 }
6297 return Qundef;
6298}
6299
6300// ZJIT implementation is using the C function
6301// and needs to call a non-static function
6302VALUE
6303rb_vm_objtostring(struct rb_control_frame_struct *reg_cfp, VALUE recv, CALL_DATA cd)
6304{
6305 return vm_objtostring(reg_cfp, recv, cd);
6306}
6307
6308static VALUE
6309vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6310{
6311 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6312 return ary;
6313 }
6314 else {
6315 return Qundef;
6316 }
6317}
6318
6319static VALUE
6320vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6321{
6322 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6323 return hash;
6324 }
6325 else {
6326 return Qundef;
6327 }
6328}
6329
6330static VALUE
6331vm_opt_str_freeze(VALUE str, int bop, ID id)
6332{
6333 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6334 return str;
6335 }
6336 else {
6337 return Qundef;
6338 }
6339}
6340
6341/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6342#define id_cmp idCmp
6343
6344static VALUE
6345vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6346{
6347 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6348 return rb_ary_includes(ary, target);
6349 }
6350 else {
6351 VALUE args[1] = {target};
6352
6353 // duparray
6354 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6355 VALUE dupary = rb_ary_resurrect(ary);
6356
6357 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6358 }
6359}
6360
6361VALUE
6362rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6363{
6364 return vm_opt_duparray_include_p(ec, ary, target);
6365}
6366
6367static VALUE
6368vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6369{
6370 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6371 if (array_len == 0) {
6372 return Qnil;
6373 }
6374 else {
6375 VALUE result = *ptr;
6376 rb_snum_t i = array_len - 1;
6377 while (i-- > 0) {
6378 const VALUE v = *++ptr;
6379 if (OPTIMIZED_CMP(v, result) > 0) {
6380 result = v;
6381 }
6382 }
6383 return result;
6384 }
6385 }
6386 else {
6387 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6388 }
6389}
6390
6391VALUE
6392rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6393{
6394 return vm_opt_newarray_max(ec, array_len, ptr);
6395}
6396
6397static VALUE
6398vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6399{
6400 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6401 if (array_len == 0) {
6402 return Qnil;
6403 }
6404 else {
6405 VALUE result = *ptr;
6406 rb_snum_t i = array_len - 1;
6407 while (i-- > 0) {
6408 const VALUE v = *++ptr;
6409 if (OPTIMIZED_CMP(v, result) < 0) {
6410 result = v;
6411 }
6412 }
6413 return result;
6414 }
6415 }
6416 else {
6417 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6418 }
6419}
6420
6421VALUE
6422rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6423{
6424 return vm_opt_newarray_min(ec, array_len, ptr);
6425}
6426
6427static VALUE
6428vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6429{
6430 // If Array#hash is _not_ monkeypatched, use the optimized call
6431 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6432 return rb_ary_hash_values(array_len, ptr);
6433 }
6434 else {
6435 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6436 }
6437}
6438
6439VALUE
6440rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr)
6441{
6442 return vm_opt_newarray_hash(ec, array_len, ptr);
6443}
6444
6445VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6446VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6447
6448static VALUE
6449vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6450{
6451 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6452 struct RArray fake_ary = {RBASIC_INIT};
6453 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6454 return rb_ary_includes(ary, target);
6455 }
6456 else {
6457 VALUE args[1] = {target};
6458 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6459 }
6460}
6461
6462VALUE
6463rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE target)
6464{
6465 return vm_opt_newarray_include_p(ec, array_len, ptr, target);
6466}
6467
6468static VALUE
6469vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6470{
6471 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6472 struct RArray fake_ary = {RBASIC_INIT};
6473 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, array_len);
6474 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6475 }
6476 else {
6477 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6478 // Setup an array with room for keyword hash.
6479 VALUE args[2];
6480 args[0] = fmt;
6481 int kw_splat = RB_NO_KEYWORDS;
6482 int argc = 1;
6483
6484 if (!UNDEF_P(buffer)) {
6485 args[1] = rb_hash_new_with_size(1);
6486 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6487 kw_splat = RB_PASS_KEYWORDS;
6488 argc++;
6489 }
6490
6491 return rb_vm_call_with_refinements(ec, rb_ary_new4(array_len, ptr), idPack, argc, args, kw_splat);
6492 }
6493}
6494
6495VALUE
6496rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt, VALUE buffer)
6497{
6498 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, buffer);
6499}
6500
6501VALUE
6502rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t array_len, const VALUE *ptr, VALUE fmt)
6503{
6504 return vm_opt_newarray_pack_buffer(ec, array_len, ptr, fmt, Qundef);
6505}
6506
6507#undef id_cmp
6508
6509static void
6510vm_track_constant_cache(ID id, void *ic)
6511{
6512 rb_vm_t *vm = GET_VM();
6513 struct rb_id_table *const_cache = &vm->constant_cache;
6514 VALUE lookup_result;
6515 set_table *ics;
6516
6517 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6518 ics = (set_table *)lookup_result;
6519 }
6520 else {
6521 ics = set_init_numtable();
6522 rb_id_table_insert(const_cache, id, (VALUE)ics);
6523 }
6524
6525 /* The call below to st_insert could allocate which could trigger a GC.
6526 * If it triggers a GC, it may free an iseq that also holds a cache to this
6527 * constant. If that iseq is the last iseq with a cache to this constant, then
6528 * it will free this ST table, which would cause an use-after-free during this
6529 * st_insert.
6530 *
6531 * So to fix this issue, we store the ID that is currently being inserted
6532 * and, in remove_from_constant_cache, we don't free the ST table for ID
6533 * equal to this one.
6534 *
6535 * See [Bug #20921].
6536 */
6537 vm->inserting_constant_cache_id = id;
6538
6539 set_insert(ics, (st_data_t)ic);
6540
6541 vm->inserting_constant_cache_id = (ID)0;
6542}
6543
6544static void
6545vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6546{
6547 RB_VM_LOCKING() {
6548 for (int i = 0; segments[i]; i++) {
6549 ID id = segments[i];
6550 if (id == idNULL) continue;
6551 vm_track_constant_cache(id, ic);
6552 }
6553 }
6554}
6555
6556// For JIT inlining
6557static inline bool
6558vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6559{
6560 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6561 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6562
6563 return (ic_cref == NULL || // no need to check CREF
6564 ic_cref == vm_get_cref(reg_ep));
6565 }
6566 return false;
6567}
6568
6569static bool
6570vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6571{
6572 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6573 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6574}
6575
6576// YJIT needs this function to never allocate and never raise
6577bool
6578rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6579{
6580 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6581}
6582
6583static void
6584vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6585{
6586 if (ruby_vm_const_missing_count > 0) {
6587 ruby_vm_const_missing_count = 0;
6588 ic->entry = NULL;
6589 return;
6590 }
6591
6592 struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6593 RB_OBJ_WRITE(ice, &ice->value, val);
6594 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6595
6596 if (rb_ractor_shareable_p(val)) {
6597 RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
6598 ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6599 }
6600 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6601 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6602 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6603 rb_yjit_constant_ic_update(iseq, ic, pos);
6604}
6605
6606VALUE
6607rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6608{
6609 VALUE val;
6610 const ID *segments = ic->segments;
6611 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6612
6613 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6614 val = ice->value;
6615
6616 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6617 }
6618 else {
6619 ruby_vm_constant_cache_misses++;
6620 val = vm_get_ev_const_chain(ec, segments);
6621 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6622 // Undo the PC increment to get the address to this instruction
6623 // INSN_ATTR(width) == 2
6624 vm_ic_update(CFP_ISEQ(GET_CFP()), ic, val, GET_EP(), CFP_PC(GET_CFP()) - 2);
6625 }
6626 return val;
6627}
6628
6629static VALUE
6630vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6631{
6632 rb_thread_t *th = rb_ec_thread_ptr(ec);
6633 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6634
6635 again:
6636 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6637 return is->once.value;
6638 }
6639 else if (is->once.running_thread == NULL) {
6640 VALUE val;
6641 is->once.running_thread = th;
6642 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6643 // TODO: confirm that it is shareable
6644
6645 if (RB_FL_ABLE(val)) {
6646 RB_OBJ_SET_SHAREABLE(val);
6647 }
6648
6649 RB_OBJ_WRITE(CFP_ISEQ(ec->cfp), &is->once.value, val);
6650
6651 /* is->once.running_thread is cleared by vm_once_clear() */
6652 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6653 return val;
6654 }
6655 else if (is->once.running_thread == th) {
6656 /* recursive once */
6657 return vm_once_exec((VALUE)iseq);
6658 }
6659 else {
6660 /* waiting for finish */
6661 RUBY_VM_CHECK_INTS(ec);
6663 goto again;
6664 }
6665}
6666
6667static OFFSET
6668vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6669{
6670 switch (OBJ_BUILTIN_TYPE(key)) {
6671 case -1:
6672 case T_FLOAT:
6673 case T_SYMBOL:
6674 case T_BIGNUM:
6675 case T_STRING:
6676 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6677 SYMBOL_REDEFINED_OP_FLAG |
6678 INTEGER_REDEFINED_OP_FLAG |
6679 FLOAT_REDEFINED_OP_FLAG |
6680 NIL_REDEFINED_OP_FLAG |
6681 TRUE_REDEFINED_OP_FLAG |
6682 FALSE_REDEFINED_OP_FLAG |
6683 STRING_REDEFINED_OP_FLAG)) {
6684 st_data_t val;
6685 if (RB_FLOAT_TYPE_P(key)) {
6686 double kval = RFLOAT_VALUE(key);
6687 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6688 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6689 }
6690 }
6691 if (rb_hash_stlike_lookup(hash, key, &val)) {
6692 return FIX2LONG((VALUE)val);
6693 }
6694 else {
6695 return else_offset;
6696 }
6697 }
6698 }
6699 return 0;
6700}
6701
6702NORETURN(static void
6703 vm_stack_consistency_error(const rb_execution_context_t *ec,
6704 const rb_control_frame_t *,
6705 const VALUE *));
6706static void
6707vm_stack_consistency_error(const rb_execution_context_t *ec,
6708 const rb_control_frame_t *cfp,
6709 const VALUE *bp)
6710{
6711 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6712 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6713 static const char stack_consistency_error[] =
6714 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6715#if defined RUBY_DEVEL
6716 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6717 rb_str_cat_cstr(mesg, "\n");
6718 rb_str_append(mesg, rb_iseq_disasm(CFP_ISEQ(cfp)));
6720#else
6721 rb_bug(stack_consistency_error, nsp, nbp);
6722#endif
6723}
6724
6725static VALUE
6726vm_opt_plus(VALUE recv, VALUE obj)
6727{
6728 if (FIXNUM_2_P(recv, obj) &&
6729 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6730 return rb_fix_plus_fix(recv, obj);
6731 }
6732 else if (FLONUM_2_P(recv, obj) &&
6733 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6734 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6735 }
6736 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6737 return Qundef;
6738 }
6739 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6740 RBASIC_CLASS(obj) == rb_cFloat &&
6741 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6742 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6743 }
6744 else if (RBASIC_CLASS(recv) == rb_cString &&
6745 RBASIC_CLASS(obj) == rb_cString &&
6746 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6747 return rb_str_opt_plus(recv, obj);
6748 }
6749 else if (RBASIC_CLASS(recv) == rb_cArray &&
6750 RBASIC_CLASS(obj) == rb_cArray &&
6751 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6752 return rb_ary_plus(recv, obj);
6753 }
6754 else {
6755 return Qundef;
6756 }
6757}
6758
6759static VALUE
6760vm_opt_minus(VALUE recv, VALUE obj)
6761{
6762 if (FIXNUM_2_P(recv, obj) &&
6763 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6764 return rb_fix_minus_fix(recv, obj);
6765 }
6766 else if (FLONUM_2_P(recv, obj) &&
6767 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6768 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6769 }
6770 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6771 return Qundef;
6772 }
6773 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6774 RBASIC_CLASS(obj) == rb_cFloat &&
6775 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6776 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6777 }
6778 else {
6779 return Qundef;
6780 }
6781}
6782
6783static VALUE
6784vm_opt_mult(VALUE recv, VALUE obj)
6785{
6786 if (FIXNUM_2_P(recv, obj) &&
6787 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6788 return rb_fix_mul_fix(recv, obj);
6789 }
6790 else if (FLONUM_2_P(recv, obj) &&
6791 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6792 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6793 }
6794 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6795 return Qundef;
6796 }
6797 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6798 RBASIC_CLASS(obj) == rb_cFloat &&
6799 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6800 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6801 }
6802 else {
6803 return Qundef;
6804 }
6805}
6806
6807static VALUE
6808vm_opt_div(VALUE recv, VALUE obj)
6809{
6810 if (FIXNUM_2_P(recv, obj) &&
6811 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6812 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6813 }
6814 else if (FLONUM_2_P(recv, obj) &&
6815 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6816 return rb_flo_div_flo(recv, obj);
6817 }
6818 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6819 return Qundef;
6820 }
6821 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6822 RBASIC_CLASS(obj) == rb_cFloat &&
6823 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6824 return rb_flo_div_flo(recv, obj);
6825 }
6826 else {
6827 return Qundef;
6828 }
6829}
6830
6831static VALUE
6832vm_opt_mod(VALUE recv, VALUE obj)
6833{
6834 if (FIXNUM_2_P(recv, obj) &&
6835 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6836 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6837 }
6838 else if (FLONUM_2_P(recv, obj) &&
6839 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6840 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6841 }
6842 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6843 return Qundef;
6844 }
6845 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6846 RBASIC_CLASS(obj) == rb_cFloat &&
6847 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6848 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6849 }
6850 else {
6851 return Qundef;
6852 }
6853}
6854
6855static VALUE
6856vm_opt_neq(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6857{
6858 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not_equal)) {
6859 VALUE val = opt_equality(reg_cfp, recv, obj, cd_eq);
6860
6861 if (!UNDEF_P(val)) {
6862 return RBOOL(!RTEST(val));
6863 }
6864 }
6865
6866 return Qundef;
6867}
6868
6869static VALUE
6870vm_opt_lt(VALUE recv, VALUE obj)
6871{
6872 if (FIXNUM_2_P(recv, obj) &&
6873 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6874 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6875 }
6876 else if (FLONUM_2_P(recv, obj) &&
6877 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6878 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6879 }
6880 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6881 return Qundef;
6882 }
6883 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6884 RBASIC_CLASS(obj) == rb_cFloat &&
6885 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6886 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6887 }
6888 else {
6889 return Qundef;
6890 }
6891}
6892
6893static VALUE
6894vm_opt_le(VALUE recv, VALUE obj)
6895{
6896 if (FIXNUM_2_P(recv, obj) &&
6897 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6898 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6899 }
6900 else if (FLONUM_2_P(recv, obj) &&
6901 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6902 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6903 }
6904 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6905 return Qundef;
6906 }
6907 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6908 RBASIC_CLASS(obj) == rb_cFloat &&
6909 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6910 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6911 }
6912 else {
6913 return Qundef;
6914 }
6915}
6916
6917static VALUE
6918vm_opt_gt(VALUE recv, VALUE obj)
6919{
6920 if (FIXNUM_2_P(recv, obj) &&
6921 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6922 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6923 }
6924 else if (FLONUM_2_P(recv, obj) &&
6925 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6926 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6927 }
6928 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6929 return Qundef;
6930 }
6931 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6932 RBASIC_CLASS(obj) == rb_cFloat &&
6933 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6934 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6935 }
6936 else {
6937 return Qundef;
6938 }
6939}
6940
6941static VALUE
6942vm_opt_ge(VALUE recv, VALUE obj)
6943{
6944 if (FIXNUM_2_P(recv, obj) &&
6945 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6946 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6947 }
6948 else if (FLONUM_2_P(recv, obj) &&
6949 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6950 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6951 }
6952 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6953 return Qundef;
6954 }
6955 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6956 RBASIC_CLASS(obj) == rb_cFloat &&
6957 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6958 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6959 }
6960 else {
6961 return Qundef;
6962 }
6963}
6964
6965
6966static VALUE
6967vm_opt_ltlt(VALUE recv, VALUE obj)
6968{
6969 if (SPECIAL_CONST_P(recv)) {
6970 return Qundef;
6971 }
6972 else if (RBASIC_CLASS(recv) == rb_cString &&
6973 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6974 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6975 return rb_str_buf_append(recv, obj);
6976 }
6977 else {
6978 return rb_str_concat(recv, obj);
6979 }
6980 }
6981 else if (RBASIC_CLASS(recv) == rb_cArray &&
6982 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6983 return rb_ary_push(recv, obj);
6984 }
6985 else {
6986 return Qundef;
6987 }
6988}
6989
6990static VALUE
6991vm_opt_and(VALUE recv, VALUE obj)
6992{
6993 // If recv and obj are both fixnums, then the bottom tag bit
6994 // will be 1 on both. 1 & 1 == 1, so the result value will also
6995 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6996 // will be 0, and we return Qundef.
6997 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6998
6999 if (FIXNUM_P(ret) &&
7000 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
7001 return ret;
7002 }
7003 else {
7004 return Qundef;
7005 }
7006}
7007
7008static VALUE
7009vm_opt_or(VALUE recv, VALUE obj)
7010{
7011 if (FIXNUM_2_P(recv, obj) &&
7012 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
7013 return recv | obj;
7014 }
7015 else {
7016 return Qundef;
7017 }
7018}
7019
7020static VALUE
7021vm_opt_aref(VALUE recv, VALUE obj)
7022{
7023 if (SPECIAL_CONST_P(recv)) {
7024 if (FIXNUM_2_P(recv, obj) &&
7025 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
7026 return rb_fix_aref(recv, obj);
7027 }
7028 return Qundef;
7029 }
7030 else if (RBASIC_CLASS(recv) == rb_cArray &&
7031 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7032 if (FIXNUM_P(obj)) {
7033 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7034 }
7035 else {
7036 return rb_ary_aref1(recv, obj);
7037 }
7038 }
7039 else if (RBASIC_CLASS(recv) == rb_cHash &&
7040 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7041 return rb_hash_aref(recv, obj);
7042 }
7043 else {
7044 return Qundef;
7045 }
7046}
7047
7048static VALUE
7049vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7050{
7051 if (SPECIAL_CONST_P(recv)) {
7052 return Qundef;
7053 }
7054 else if (RBASIC_CLASS(recv) == rb_cArray &&
7055 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7056 FIXNUM_P(obj)) {
7057 rb_ary_store(recv, FIX2LONG(obj), set);
7058 return set;
7059 }
7060 else if (RBASIC_CLASS(recv) == rb_cHash &&
7061 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7062 rb_hash_aset(recv, obj, set);
7063 return set;
7064 }
7065 else {
7066 return Qundef;
7067 }
7068}
7069
7070static VALUE
7071vm_opt_length(VALUE recv, int bop)
7072{
7073 if (SPECIAL_CONST_P(recv)) {
7074 return Qundef;
7075 }
7076 else if (RBASIC_CLASS(recv) == rb_cString &&
7077 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7078 if (bop == BOP_EMPTY_P) {
7079 return LONG2NUM(RSTRING_LEN(recv));
7080 }
7081 else {
7082 return rb_str_length(recv);
7083 }
7084 }
7085 else if (RBASIC_CLASS(recv) == rb_cArray &&
7086 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7087 return LONG2NUM(RARRAY_LEN(recv));
7088 }
7089 else if (RBASIC_CLASS(recv) == rb_cHash &&
7090 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7091 return INT2FIX(RHASH_SIZE(recv));
7092 }
7093 else {
7094 return Qundef;
7095 }
7096}
7097
7098static VALUE
7099vm_opt_empty_p(VALUE recv)
7100{
7101 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7102 case Qundef: return Qundef;
7103 case INT2FIX(0): return Qtrue;
7104 default: return Qfalse;
7105 }
7106}
7107
7108VALUE rb_false(VALUE obj);
7109
7110static VALUE
7111vm_opt_nil_p(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7112{
7113 if (NIL_P(recv) &&
7114 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7115 return Qtrue;
7116 }
7117 else if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_false)) {
7118 return Qfalse;
7119 }
7120 else {
7121 return Qundef;
7122 }
7123}
7124
7125static VALUE
7126fix_succ(VALUE x)
7127{
7128 switch (x) {
7129 case ~0UL:
7130 /* 0xFFFF_FFFF == INT2FIX(-1)
7131 * `-1.succ` is of course 0. */
7132 return INT2FIX(0);
7133 case RSHIFT(~0UL, 1):
7134 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7135 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7136 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7137 default:
7138 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7139 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7140 * == lx*2 + ly*2 + 1
7141 * == (lx*2+1) + (ly*2+1) - 1
7142 * == x + y - 1
7143 *
7144 * Here, if we put y := INT2FIX(1):
7145 *
7146 * == x + INT2FIX(1) - 1
7147 * == x + 2 .
7148 */
7149 return x + 2;
7150 }
7151}
7152
7153static VALUE
7154vm_opt_succ(VALUE recv)
7155{
7156 if (FIXNUM_P(recv) &&
7157 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7158 return fix_succ(recv);
7159 }
7160 else if (SPECIAL_CONST_P(recv)) {
7161 return Qundef;
7162 }
7163 else if (RBASIC_CLASS(recv) == rb_cString &&
7164 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7165 return rb_str_succ(recv);
7166 }
7167 else {
7168 return Qundef;
7169 }
7170}
7171
7172static VALUE
7173vm_opt_not(struct rb_control_frame_struct *reg_cfp, CALL_DATA cd, VALUE recv)
7174{
7175 if (vm_method_cfunc_is(reg_cfp, cd, recv, rb_obj_not)) {
7176 return RBOOL(!RTEST(recv));
7177 }
7178 else {
7179 return Qundef;
7180 }
7181}
7182
7183static VALUE
7184vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7185{
7186 if (SPECIAL_CONST_P(recv)) {
7187 return Qundef;
7188 }
7189 else if (RBASIC_CLASS(recv) == rb_cString &&
7190 CLASS_OF(obj) == rb_cRegexp &&
7191 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7192 return rb_reg_match(obj, recv);
7193 }
7194 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7195 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7196 return rb_reg_match(recv, obj);
7197 }
7198 else {
7199 return Qundef;
7200 }
7201}
7202
7203rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7204
7205NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7206
7207static inline void
7208vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7209 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7210 rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
7211{
7212 rb_event_flag_t event = pc_events & target_event;
7213 VALUE self = GET_SELF();
7214
7215 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7216
7217 if (local_hooks) local_hooks->running++; // make sure they don't get deleted while global hooks run
7218
7219 if (event & global_hooks->events) {
7220 /* increment PC because source line is calculated with PC-1 */
7221 reg_cfp->pc++;
7222 vm_dtrace(event, ec);
7223 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7224 reg_cfp->pc--;
7225 }
7226
7227 if (local_hooks) local_hooks->running--;
7228 if (local_hooks != NULL) {
7229 if (event & local_hooks->events) {
7230 /* increment PC because source line is calculated with PC-1 */
7231 reg_cfp->pc++;
7232 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7233 reg_cfp->pc--;
7234 }
7235 }
7236}
7237
7238#define VM_TRACE_HOOK(target_event, val) do { \
7239 if ((pc_events & (target_event)) & enabled_flags) { \
7240 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
7241 } \
7242} while (0)
7243
7244static VALUE
7245rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7246{
7247 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7248 VM_ASSERT(ISEQ_BODY(CFP_ISEQ(cfp))->type == ISEQ_TYPE_RESCUE);
7249 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7250}
7251
7252static void
7253vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7254{
7255 const VALUE *pc = reg_cfp->pc;
7256 rb_ractor_t *r = rb_ec_ractor_ptr(ec);
7257 rb_event_flag_t enabled_flags = r->pub.hooks.events & ISEQ_TRACE_EVENTS;
7258 rb_event_flag_t ractor_events = enabled_flags;
7259
7260 if (enabled_flags == 0 && rb_ractor_targeted_hooks_cnt(r) == 0) {
7261 return;
7262 }
7263 else {
7264 const rb_iseq_t *iseq = CFP_ISEQ(reg_cfp);
7265 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7266 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7267 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
7268 rb_hook_list_t *local_hooks = NULL;
7269 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
7270 st_data_t val;
7271 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)iseq, &val)) {
7272 local_hooks = (rb_hook_list_t*)val;
7273 }
7274 }
7275 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7276
7277 rb_hook_list_t *bmethod_local_hooks = NULL;
7278 rb_event_flag_t bmethod_local_events = 0;
7279 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7280 enabled_flags |= iseq_local_events;
7281
7282 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7283
7284 if (bmethod_frame) {
7285 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7286 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7287 unsigned int bmethod_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
7288 if (RB_UNLIKELY(bmethod_hooks_cnt > 0)) {
7289 st_data_t val;
7290 if (st_lookup(rb_ractor_targeted_hooks(r), (st_data_t)me->def, &val)) {
7291 bmethod_local_hooks = (rb_hook_list_t*)val;
7292 }
7293 if (bmethod_local_hooks) {
7294 bmethod_local_events = bmethod_local_hooks->events;
7295 }
7296 }
7297 }
7298
7299 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7300#if 0
7301 /* disable trace */
7302 /* TODO: incomplete */
7303 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7304#else
7305 /* do not disable trace because of performance problem
7306 * (re-enable overhead)
7307 */
7308#endif
7309 return;
7310 }
7311 else if (ec->trace_arg != NULL) {
7312 /* already tracing */
7313 return;
7314 }
7315 else {
7316 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7317 /* Note, not considering iseq local events here since the same
7318 * iseq could be used in multiple bmethods. */
7319 rb_event_flag_t bmethod_events = ractor_events | bmethod_local_events;
7320
7321 if (0) {
7322 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7323 (int)pos,
7324 (int)pc_events,
7325 RSTRING_PTR(rb_iseq_path(iseq)),
7326 (int)rb_iseq_line_no(iseq, pos),
7327 RSTRING_PTR(rb_iseq_label(iseq)));
7328 }
7329 VM_ASSERT(reg_cfp->pc == pc);
7330 VM_ASSERT(pc_events != 0);
7331
7332 /* check traces */
7333 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7334 /* b_call instruction running as a method. Fire call event. */
7335 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
7336 }
7338 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7339 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7340 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7341 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7342 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7343 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7344 /* b_return instruction running as a method. Fire return event. */
7345 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
7346 }
7347 }
7348 }
7349}
7350#undef VM_TRACE_HOOK
7351
7352#if VM_CHECK_MODE > 0
7353NORETURN( NOINLINE( COLDFUNC
7354void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7355
7356void
7357Init_vm_stack_canary(void)
7358{
7359 /* This has to be called _after_ our PRNG is properly set up. */
7360 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7361 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7362
7363 vm_stack_canary_was_born = true;
7364 VM_ASSERT(n == 0);
7365}
7366
7367void
7368rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7369{
7370 /* Because a method has already been called, why not call
7371 * another one. */
7372 const char *insn = rb_insns_name(i);
7373 VALUE inspection = rb_inspect(c);
7374 const char *str = StringValueCStr(inspection);
7375
7376 rb_bug("dead canary found at %s: %s", insn, str);
7377}
7378
7379#else
7380void Init_vm_stack_canary(void) { /* nothing to do */ }
7381#endif
7382
7383
7384/* a part of the following code is generated by this ruby script:
7385
738616.times{|i|
7387 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7388 typedef_args.prepend(", ") if i != 0
7389 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7390 call_args.prepend(", ") if i != 0
7391 puts %Q{
7392static VALUE
7393builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7394{
7395 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7396 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7397}}
7398}
7399
7400puts
7401puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
740216.times{|i|
7403 puts " builtin_invoker#{i},"
7404}
7405puts "};"
7406*/
7407
7408static VALUE
7409builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7410{
7411 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7412 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7413}
7414
7415static VALUE
7416builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7417{
7418 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7419 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7420}
7421
7422static VALUE
7423builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7424{
7425 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7426 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7427}
7428
7429static VALUE
7430builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7431{
7432 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7433 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7434}
7435
7436static VALUE
7437builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7438{
7439 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7440 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7441}
7442
7443static VALUE
7444builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7445{
7446 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7447 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7448}
7449
7450static VALUE
7451builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7452{
7453 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7454 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7455}
7456
7457static VALUE
7458builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7459{
7460 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7461 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7462}
7463
7464static VALUE
7465builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7466{
7467 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7468 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7469}
7470
7471static VALUE
7472builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7473{
7474 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7475 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7476}
7477
7478static VALUE
7479builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7480{
7481 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7482 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7483}
7484
7485static VALUE
7486builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7487{
7488 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7489 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7490}
7491
7492static VALUE
7493builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7494{
7495 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7496 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7497}
7498
7499static VALUE
7500builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7501{
7502 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7503 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7504}
7505
7506static VALUE
7507builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7508{
7509 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7510 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7511}
7512
7513static VALUE
7514builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7515{
7516 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7517 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7518}
7519
7520typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7521
7522static builtin_invoker
7523lookup_builtin_invoker(int argc)
7524{
7525 static const builtin_invoker invokers[] = {
7526 builtin_invoker0,
7527 builtin_invoker1,
7528 builtin_invoker2,
7529 builtin_invoker3,
7530 builtin_invoker4,
7531 builtin_invoker5,
7532 builtin_invoker6,
7533 builtin_invoker7,
7534 builtin_invoker8,
7535 builtin_invoker9,
7536 builtin_invoker10,
7537 builtin_invoker11,
7538 builtin_invoker12,
7539 builtin_invoker13,
7540 builtin_invoker14,
7541 builtin_invoker15,
7542 };
7543
7544 return invokers[argc];
7545}
7546
7547static inline VALUE
7548invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7549{
7550 const bool canary_p = ISEQ_BODY(CFP_ISEQ(reg_cfp))->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7551 SETUP_CANARY(canary_p);
7552 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7553 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7554 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7555 return ret;
7556}
7557
7558static VALUE
7559vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7560{
7561 return invoke_bf(ec, cfp, bf, argv);
7562}
7563
7564static VALUE
7565vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7566{
7567 if (0) { // debug print
7568 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7569 for (int i=0; i<bf->argc; i++) {
7570 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(CFP_ISEQ(cfp))->local_table[i+start_index]));
7571 }
7572 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7573 (void *)(uintptr_t)bf->func_ptr);
7574 }
7575
7576 if (bf->argc == 0) {
7577 return invoke_bf(ec, cfp, bf, NULL);
7578 }
7579 else {
7580 const VALUE *argv = cfp->ep - ISEQ_BODY(CFP_ISEQ(cfp))->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7581 return invoke_bf(ec, cfp, bf, argv);
7582 }
7583}
7584
7585// for __builtin_inline!()
7586
7587VALUE
7588rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7589{
7590 const rb_control_frame_t *cfp = ec->cfp;
7591 return cfp->ep[index];
7592}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
static bool RB_FL_ABLE(VALUE obj)
Checks if the object is flaggable.
Definition fl_type.h:381
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2821
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1591
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1488
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1467
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:126
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3898
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
VALUE rb_eFatal
fatal exception.
Definition error.c:1423
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1435
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:674
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4219
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1478
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2254
VALUE rb_cRegexp
Regexp class.
Definition re.c:2664
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1325
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:657
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:226
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:894
VALUE rb_cFloat
Float class.
Definition numeric.c:198
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1169
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1952
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3796
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1927
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2009
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1910
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1976
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2042
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3834
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5379
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3800
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4071
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2435
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:968
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1514
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3476
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2047
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4263
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4324
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1515
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3954
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3306
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3482
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:423
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2126
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3814
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4347
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3808
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:689
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1740
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2310
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1164
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument must be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:289
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:37
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:50
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:59
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376