Ruby 3.5.0dev (2025-11-03 revision 4a3d8346a6d0e068508631541f6bc43e8b154ea1)
vm_insnhelper.c (4a3d8346a6d0e068508631541f6bc43e8b154ea1)
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "ruby/internal/config.h"
12
13#include <math.h>
14
15#ifdef HAVE_STDATOMIC_H
16 #include <stdatomic.h>
17#endif
18
19#include "constant.h"
20#include "debug_counter.h"
21#include "internal.h"
22#include "internal/class.h"
23#include "internal/compar.h"
24#include "internal/hash.h"
25#include "internal/numeric.h"
26#include "internal/proc.h"
27#include "internal/random.h"
28#include "internal/variable.h"
29#include "internal/set_table.h"
30#include "internal/struct.h"
31#include "variable.h"
32
33/* finish iseq array */
34#include "insns.inc"
35#include "insns_info.inc"
36
37extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
38extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
39extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
40extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
41 int argc, const VALUE *argv, int priv);
42
43static const struct rb_callcache vm_empty_cc;
44static const struct rb_callcache vm_empty_cc_for_super;
45
46/* control stack frame */
47
48static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49
51ruby_vm_special_exception_copy(VALUE exc)
52{
54 rb_obj_copy_ivar(e, exc);
55 return e;
56}
57
58NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
59static void
60ec_stack_overflow(rb_execution_context_t *ec, int setup)
61{
62 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
63 ec->raised_flag = RAISED_STACKOVERFLOW;
64 if (setup) {
65 VALUE at = rb_ec_backtrace_object(ec);
66 mesg = ruby_vm_special_exception_copy(mesg);
67 rb_ivar_set(mesg, idBt, at);
68 rb_ivar_set(mesg, idBt_locations, at);
69 }
70 ec->errinfo = mesg;
71 EC_JUMP_TAG(ec, TAG_RAISE);
72}
73
74NORETURN(static void vm_stackoverflow(void));
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
82void
83rb_ec_stack_overflow(rb_execution_context_t *ec, ruby_stack_overflow_critical_level crit)
84{
85 if (rb_during_gc()) {
86 rb_bug("system stack overflow during GC. Faulty native extension?");
87 }
88 if (crit >= rb_stack_overflow_fatal) {
89 ec->raised_flag = RAISED_STACKOVERFLOW;
90 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91 EC_JUMP_TAG(ec, TAG_RAISE);
92 }
93 ec_stack_overflow(ec, crit < rb_stack_overflow_signal);
94}
95
96static inline void stack_check(rb_execution_context_t *ec);
97
98#if VM_CHECK_MODE > 0
99static int
100callable_class_p(VALUE klass)
101{
102#if VM_CHECK_MODE >= 2
103 if (!klass) return FALSE;
104 switch (RB_BUILTIN_TYPE(klass)) {
105 default:
106 break;
107 case T_ICLASS:
108 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
109 case T_MODULE:
110 return TRUE;
111 }
112 while (klass) {
113 if (klass == rb_cBasicObject) {
114 return TRUE;
115 }
116 klass = RCLASS_SUPER(klass);
117 }
118 return FALSE;
119#else
120 return klass != 0;
121#endif
122}
123
124static int
125callable_method_entry_p(const rb_callable_method_entry_t *cme)
126{
127 if (cme == NULL) {
128 return TRUE;
129 }
130 else {
131 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment), "imemo_type:%s", rb_imemo_name(imemo_type((VALUE)cme)));
132
133 if (callable_class_p(cme->defined_class)) {
134 return TRUE;
135 }
136 else {
137 return FALSE;
138 }
139 }
140}
141
142static void
143vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
144{
145 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
146 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
147
148 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
149 cref_or_me_type = imemo_type(cref_or_me);
150 }
151 if (type & VM_FRAME_FLAG_BMETHOD) {
152 req_me = TRUE;
153 }
154
155 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
157 }
158 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
160 }
161
162 if (req_me) {
163 if (cref_or_me_type != imemo_ment) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
165 }
166 }
167 else {
168 if (req_cref && cref_or_me_type != imemo_cref) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
170 }
171 else { /* cref or Qfalse */
172 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
173 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC || magic == VM_FRAME_MAGIC_DUMMY) && (cref_or_me_type == imemo_ment)) {
174 /* ignore */
175 }
176 else {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
178 }
179 }
180 }
181 }
182
183 if (cref_or_me_type == imemo_ment) {
184 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
185
186 if (!callable_method_entry_p(me)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
188 }
189 }
190
191 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
192 VM_ASSERT(iseq == NULL ||
193 RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
195 );
196 }
197 else {
198 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
199 }
200}
201
202static void
203vm_check_frame(VALUE type,
204 VALUE specval,
205 VALUE cref_or_me,
206 const rb_iseq_t *iseq)
207{
208 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
209 VM_ASSERT(FIXNUM_P(type));
210
211#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
212 case magic: \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
215 break
216 switch (given_magic) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
219 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
220 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
221 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
222 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
224 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
225 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
226 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
227 default:
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
229 }
230#undef CHECK
231}
232
233static VALUE vm_stack_canary; /* Initialized later */
234static bool vm_stack_canary_was_born = false;
235
236// Return the index of the instruction right before the given PC.
237// This is needed because insn_entry advances PC before the insn body.
238static unsigned int
239previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
240{
241 unsigned int pos = 0;
242 while (pos < ISEQ_BODY(iseq)->iseq_size) {
243 int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
244 unsigned int next_pos = pos + insn_len(opcode);
245 if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
246 return pos;
247 }
248 pos = next_pos;
249 }
250 rb_bug("failed to find the previous insn");
251}
252
253void
254rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
255{
256 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
257 const struct rb_iseq_struct *iseq;
258
259 if (! LIKELY(vm_stack_canary_was_born)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
261 }
262 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
264 return;
265 }
266 else if (! (iseq = GET_ISEQ())) {
267 return;
268 }
269 else if (LIKELY(sp[0] != vm_stack_canary)) {
270 return;
271 }
272 else {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
275 sp[0] = Qundef;
276 }
277
278 const VALUE *orig = rb_iseq_original_iseq(iseq);
279 const VALUE iseqw = rb_iseqw_new(iseq);
280 const VALUE inspection = rb_inspect(iseqw);
281 const char *stri = rb_str_to_cstr(inspection);
282 const VALUE disasm = rb_iseq_disasm(iseq);
283 const char *strd = rb_str_to_cstr(disasm);
284 const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
285 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
286 const char *name = insn_name(insn);
287
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
290 very restricted. */
291 ruby_debug_printf(
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR"\n"
294 "watch out the C stack trace.\n"
295 "%s",
296 name, stri, pos, strd);
297 rb_bug("see above.");
298}
299#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
300
301#else
302#define vm_check_canary(ec, sp)
303#define vm_check_frame(a, b, c, d)
304#endif /* VM_CHECK_MODE > 0 */
305
306#if USE_DEBUG_COUNTER
307static void
308vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct *ec,
310 const struct rb_control_frame_struct *reg_cfp,
311 VALUE type)
312{
313 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
314
315 RB_DEBUG_COUNTER_INC(frame_push);
316
317 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
318 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
319 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
320 if (prev) {
321 if (curr) {
322 RB_DEBUG_COUNTER_INC(frame_R2R);
323 }
324 else {
325 RB_DEBUG_COUNTER_INC(frame_R2C);
326 }
327 }
328 else {
329 if (curr) {
330 RB_DEBUG_COUNTER_INC(frame_C2R);
331 }
332 else {
333 RB_DEBUG_COUNTER_INC(frame_C2C);
334 }
335 }
336 }
337
338 switch (type & VM_FRAME_MAGIC_MASK) {
339 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
340 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
341 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
342 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
343 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
344 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
345 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
346 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
347 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
348 }
349
350 rb_bug("unreachable");
351}
352#else
353#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
354#endif
355
356// Return a poison value to be set above the stack top to verify leafness.
357VALUE
358rb_vm_stack_canary(void)
359{
360#if VM_CHECK_MODE > 0
361 return vm_stack_canary;
362#else
363 return 0;
364#endif
365}
366
367STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
368STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
369STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
370
371static void
372vm_push_frame(rb_execution_context_t *ec,
373 const rb_iseq_t *iseq,
374 VALUE type,
375 VALUE self,
376 VALUE specval,
377 VALUE cref_or_me,
378 const VALUE *pc,
379 VALUE *sp,
380 int local_size,
381 int stack_max)
382{
383 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
384
385 vm_check_frame(type, specval, cref_or_me, iseq);
386 VM_ASSERT(local_size >= 0);
387
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
390 vm_check_canary(ec, sp);
391
392 /* setup vm value stack */
393
394 /* initialize local variables */
395 for (int i=0; i < local_size; i++) {
396 *sp++ = Qnil;
397 }
398
399 /* setup ep with managing data */
400 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
402 *sp++ = type; /* ep[-0] / ENV_FLAGS */
403
404 /* setup new frame */
405 *cfp = (const struct rb_control_frame_struct) {
406 .pc = pc,
407 .sp = sp,
408 .iseq = iseq,
409 .self = self,
410 .ep = sp - 1,
411 .block_code = NULL,
412#if VM_DEBUG_BP_CHECK
413 .bp_check = sp,
414#endif
415 .jit_return = NULL,
416 };
417
418 /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
419 This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
420 future/untested compilers/platforms. */
421
422 #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
423 atomic_signal_fence(memory_order_seq_cst);
424 #endif
425
426 ec->cfp = cfp;
427
428 if (VMDEBUG == 2) {
429 SDR();
430 }
431 vm_push_frame_debug_counter_inc(ec, cfp, type);
432}
433
434void
435rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
436{
437 rb_control_frame_t *cfp = ec->cfp;
438
439 if (VMDEBUG == 2) SDR();
440
441 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
442}
443
444/* return TRUE if the frame is finished */
445static inline int
446vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
447{
448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
449
450 if (VMDEBUG == 2) SDR();
451
452 RUBY_VM_CHECK_INTS(ec);
453 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
454
455 return flags & VM_FRAME_FLAG_FINISH;
456}
457
458void
459rb_vm_pop_frame(rb_execution_context_t *ec)
460{
461 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
462}
463
464// it pushes pseudo-frame with fname filename.
465VALUE
466rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
467{
468 rb_iseq_t *rb_iseq_alloc_with_dummy_path(VALUE fname);
469 rb_iseq_t *dmy_iseq = rb_iseq_alloc_with_dummy_path(fname);
470
471 vm_push_frame(ec,
472 dmy_iseq, //const rb_iseq_t *iseq,
473 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
474 ec->cfp->self, // VALUE self,
475 VM_BLOCK_HANDLER_NONE, // VALUE specval,
476 Qfalse, // VALUE cref_or_me,
477 NULL, // const VALUE *pc,
478 ec->cfp->sp, // VALUE *sp,
479 0, // int local_size,
480 0); // int stack_max
481
482 return (VALUE)dmy_iseq;
483}
484
485/* method dispatch */
486static inline VALUE
487rb_arity_error_new(int argc, int min, int max)
488{
489 VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
490 if (min == max) {
491 /* max is not needed */
492 }
493 else if (max == UNLIMITED_ARGUMENTS) {
494 rb_str_cat_cstr(err_mess, "+");
495 }
496 else {
497 rb_str_catf(err_mess, "..%d", max);
498 }
499 rb_str_cat_cstr(err_mess, ")");
500 return rb_exc_new3(rb_eArgError, err_mess);
501}
502
503void
504rb_error_arity(int argc, int min, int max)
505{
506 rb_exc_raise(rb_arity_error_new(argc, min, max));
507}
508
509/* lvar */
510
511NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
512
513static void
514vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
515{
516 /* remember env value forcely */
517 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
518 VM_FORCE_WRITE(&ep[index], v);
519 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
520 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
521}
522
523// YJIT assumes this function never runs GC
524static inline void
525vm_env_write(const VALUE *ep, int index, VALUE v)
526{
527 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
528 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
529 VM_STACK_ENV_WRITE(ep, index, v);
530 }
531 else {
532 vm_env_write_slowpath(ep, index, v);
533 }
534}
535
536void
537rb_vm_env_write(const VALUE *ep, int index, VALUE v)
538{
539 vm_env_write(ep, index, v);
540}
541
542VALUE
543rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
544{
545 if (block_handler == VM_BLOCK_HANDLER_NONE) {
546 return Qnil;
547 }
548 else {
549 switch (vm_block_handler_type(block_handler)) {
550 case block_handler_type_iseq:
551 case block_handler_type_ifunc:
552 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
553 case block_handler_type_symbol:
554 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
555 case block_handler_type_proc:
556 return VM_BH_TO_PROC(block_handler);
557 default:
558 VM_UNREACHABLE(rb_vm_bh_to_procval);
559 }
560 }
561}
562
563/* svar */
564
565#if VM_CHECK_MODE > 0
566static int
567vm_svar_valid_p(VALUE svar)
568{
569 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
570 switch (imemo_type(svar)) {
571 case imemo_svar:
572 case imemo_cref:
573 case imemo_ment:
574 return TRUE;
575 default:
576 break;
577 }
578 }
579 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
580 return FALSE;
581}
582#endif
583
584static inline struct vm_svar *
585lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
586{
587 VALUE svar;
588
589 if (lep && (ec == NULL || ec->root_lep != lep)) {
590 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
591 }
592 else {
593 svar = ec->root_svar;
594 }
595
596 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
597
598 return (struct vm_svar *)svar;
599}
600
601static inline void
602lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
603{
604 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
605
606 if (lep && (ec == NULL || ec->root_lep != lep)) {
607 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
608 }
609 else {
610 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
611 }
612}
613
614static VALUE
615lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
616{
617 const struct vm_svar *svar = lep_svar(ec, lep);
618
619 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
620
621 switch (key) {
622 case VM_SVAR_LASTLINE:
623 return svar->lastline;
624 case VM_SVAR_BACKREF:
625 return svar->backref;
626 default: {
627 const VALUE ary = svar->others;
628
629 if (NIL_P(ary)) {
630 return Qnil;
631 }
632 else {
633 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
634 }
635 }
636 }
637}
638
639static struct vm_svar *
640svar_new(VALUE obj)
641{
642 struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
643 *((VALUE *)&svar->lastline) = Qnil;
644 *((VALUE *)&svar->backref) = Qnil;
645 *((VALUE *)&svar->others) = Qnil;
646
647 return svar;
648}
649
650static void
651lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
652{
653 struct vm_svar *svar = lep_svar(ec, lep);
654
655 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
656 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
657 }
658
659 switch (key) {
660 case VM_SVAR_LASTLINE:
661 RB_OBJ_WRITE(svar, &svar->lastline, val);
662 return;
663 case VM_SVAR_BACKREF:
664 RB_OBJ_WRITE(svar, &svar->backref, val);
665 return;
666 default: {
667 VALUE ary = svar->others;
668
669 if (NIL_P(ary)) {
670 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
671 }
672 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
673 }
674 }
675}
676
677static inline VALUE
678vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
679{
680 VALUE val;
681
682 if (type == 0) {
683 val = lep_svar_get(ec, lep, key);
684 }
685 else {
686 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
687
688 if (type & 0x01) {
689 switch (type >> 1) {
690 case '&':
691 val = rb_reg_last_match(backref);
692 break;
693 case '`':
694 val = rb_reg_match_pre(backref);
695 break;
696 case '\'':
697 val = rb_reg_match_post(backref);
698 break;
699 case '+':
700 val = rb_reg_match_last(backref);
701 break;
702 default:
703 rb_bug("unexpected back-ref");
704 }
705 }
706 else {
707 val = rb_reg_nth_match((int)(type >> 1), backref);
708 }
709 }
710 return val;
711}
712
713static inline VALUE
714vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
715{
716 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
717 int nth = 0;
718
719 if (type & 0x01) {
720 switch (type >> 1) {
721 case '&':
722 case '`':
723 case '\'':
724 break;
725 case '+':
726 return rb_reg_last_defined(backref);
727 default:
728 rb_bug("unexpected back-ref");
729 }
730 }
731 else {
732 nth = (int)(type >> 1);
733 }
734 return rb_reg_nth_defined(nth, backref);
735}
736
737PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
739check_method_entry(VALUE obj, int can_be_svar)
740{
741 if (obj == Qfalse) return NULL;
742
743#if VM_CHECK_MODE > 0
744 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
745#endif
746
747 switch (imemo_type(obj)) {
748 case imemo_ment:
749 return (rb_callable_method_entry_t *)obj;
750 case imemo_cref:
751 return NULL;
752 case imemo_svar:
753 if (can_be_svar) {
754 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
755 }
756 default:
757#if VM_CHECK_MODE > 0
758 rb_bug("check_method_entry: svar should not be there:");
759#endif
760 return NULL;
761 }
762}
763
765env_method_entry_unchecked(VALUE obj, int can_be_svar)
766{
767 if (obj == Qfalse) return NULL;
768
769 switch (imemo_type(obj)) {
770 case imemo_ment:
771 return (rb_callable_method_entry_t *)obj;
772 case imemo_cref:
773 return NULL;
774 case imemo_svar:
775 if (can_be_svar) {
776 return env_method_entry_unchecked(((struct vm_svar *)obj)->cref_or_me, FALSE);
777 }
778 default:
779 return NULL;
780 }
781}
782
784rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
785{
786 const VALUE *ep = cfp->ep;
788
789 while (!VM_ENV_LOCAL_P(ep)) {
790 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
791 ep = VM_ENV_PREV_EP(ep);
792 }
793
794 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
795}
796
798rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp)
799{
800 const VALUE *ep = cfp->ep;
802
803 while (!VM_ENV_LOCAL_P_UNCHECKED(ep)) {
804 if ((me = env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
805 ep = VM_ENV_PREV_EP_UNCHECKED(ep);
806 }
807
808 return env_method_entry_unchecked(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
809}
810
811static const rb_iseq_t *
812method_entry_iseqptr(const rb_callable_method_entry_t *me)
813{
814 switch (me->def->type) {
815 case VM_METHOD_TYPE_ISEQ:
816 return me->def->body.iseq.iseqptr;
817 default:
818 return NULL;
819 }
820}
821
822static rb_cref_t *
823method_entry_cref(const rb_callable_method_entry_t *me)
824{
825 switch (me->def->type) {
826 case VM_METHOD_TYPE_ISEQ:
827 return me->def->body.iseq.cref;
828 default:
829 return NULL;
830 }
831}
832
833#if VM_CHECK_MODE == 0
834PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
835#endif
836static rb_cref_t *
837check_cref(VALUE obj, int can_be_svar)
838{
839 if (obj == Qfalse) return NULL;
840
841#if VM_CHECK_MODE > 0
842 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
843#endif
844
845 switch (imemo_type(obj)) {
846 case imemo_ment:
847 return method_entry_cref((rb_callable_method_entry_t *)obj);
848 case imemo_cref:
849 return (rb_cref_t *)obj;
850 case imemo_svar:
851 if (can_be_svar) {
852 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
853 }
854 default:
855#if VM_CHECK_MODE > 0
856 rb_bug("check_method_entry: svar should not be there:");
857#endif
858 return NULL;
859 }
860}
861
862static inline rb_cref_t *
863vm_env_cref(const VALUE *ep)
864{
865 rb_cref_t *cref;
866
867 while (!VM_ENV_LOCAL_P(ep)) {
868 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
869 ep = VM_ENV_PREV_EP(ep);
870 }
871
872 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
873}
874
875static int
876is_cref(const VALUE v, int can_be_svar)
877{
878 if (RB_TYPE_P(v, T_IMEMO)) {
879 switch (imemo_type(v)) {
880 case imemo_cref:
881 return TRUE;
882 case imemo_svar:
883 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
884 default:
885 break;
886 }
887 }
888 return FALSE;
889}
890
891static int
892vm_env_cref_by_cref(const VALUE *ep)
893{
894 while (!VM_ENV_LOCAL_P(ep)) {
895 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
896 ep = VM_ENV_PREV_EP(ep);
897 }
898 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
899}
900
901static rb_cref_t *
902cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
903{
904 const VALUE v = *vptr;
905 rb_cref_t *cref, *new_cref;
906
907 if (RB_TYPE_P(v, T_IMEMO)) {
908 switch (imemo_type(v)) {
909 case imemo_cref:
910 cref = (rb_cref_t *)v;
911 new_cref = vm_cref_dup(cref);
912 if (parent) {
913 RB_OBJ_WRITE(parent, vptr, new_cref);
914 }
915 else {
916 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
917 }
918 return (rb_cref_t *)new_cref;
919 case imemo_svar:
920 if (can_be_svar) {
921 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
922 }
923 /* fall through */
924 case imemo_ment:
925 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
926 default:
927 break;
928 }
929 }
930 return NULL;
931}
932
933static rb_cref_t *
934vm_cref_replace_with_duplicated_cref(const VALUE *ep)
935{
936 if (vm_env_cref_by_cref(ep)) {
937 rb_cref_t *cref;
938 VALUE envval;
939
940 while (!VM_ENV_LOCAL_P(ep)) {
941 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
942 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
943 return cref;
944 }
945 ep = VM_ENV_PREV_EP(ep);
946 }
947 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
948 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
949 }
950 else {
951 rb_bug("vm_cref_dup: unreachable");
952 }
953}
954
955static rb_cref_t *
956vm_get_cref(const VALUE *ep)
957{
958 rb_cref_t *cref = vm_env_cref(ep);
959
960 if (cref != NULL) {
961 return cref;
962 }
963 else {
964 rb_bug("vm_get_cref: unreachable");
965 }
966}
967
968rb_cref_t *
969rb_vm_get_cref(const VALUE *ep)
970{
971 return vm_get_cref(ep);
972}
973
974static rb_cref_t *
975vm_ec_cref(const rb_execution_context_t *ec)
976{
977 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
978
979 if (cfp == NULL) {
980 return NULL;
981 }
982 return vm_get_cref(cfp->ep);
983}
984
985static const rb_cref_t *
986vm_get_const_key_cref(const VALUE *ep)
987{
988 const rb_cref_t *cref = vm_get_cref(ep);
989 const rb_cref_t *key_cref = cref;
990
991 while (cref) {
992 if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
993 RCLASS_CLONED_P(CREF_CLASS(cref)) ) {
994 return key_cref;
995 }
996 cref = CREF_NEXT(cref);
997 }
998
999 /* does not include singleton class */
1000 return NULL;
1001}
1002
1003rb_cref_t *
1004rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass)
1005{
1006 rb_cref_t *new_cref_head = NULL;
1007 rb_cref_t *new_cref_tail = NULL;
1008
1009 #define ADD_NEW_CREF(new_cref) \
1010 if (new_cref_tail) { \
1011 RB_OBJ_WRITE(new_cref_tail, &new_cref_tail->next, new_cref); \
1012 } \
1013 else { \
1014 new_cref_head = new_cref; \
1015 } \
1016 new_cref_tail = new_cref;
1017
1018 while (cref) {
1019 rb_cref_t *new_cref;
1020 if (CREF_CLASS(cref) == old_klass) {
1021 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1022 ADD_NEW_CREF(new_cref);
1023 return new_cref_head;
1024 }
1025 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
1026 cref = CREF_NEXT(cref);
1027 ADD_NEW_CREF(new_cref);
1028 }
1029
1030 #undef ADD_NEW_CREF
1031
1032 // Could we just reuse the original cref?
1033 return new_cref_head;
1034}
1035
1036static rb_cref_t *
1037vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1038{
1039 rb_cref_t *prev_cref = NULL;
1040
1041 if (ep) {
1042 prev_cref = vm_env_cref(ep);
1043 }
1044 else {
1045 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1046
1047 if (cfp) {
1048 prev_cref = vm_env_cref(cfp->ep);
1049 }
1050 }
1051
1052 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1053}
1054
1055static inline VALUE
1056vm_get_cbase(const VALUE *ep)
1057{
1058 const rb_cref_t *cref = vm_get_cref(ep);
1059
1060 return CREF_CLASS_FOR_DEFINITION(cref);
1061}
1062
1063static inline VALUE
1064vm_get_const_base(const VALUE *ep)
1065{
1066 const rb_cref_t *cref = vm_get_cref(ep);
1067
1068 while (cref) {
1069 if (!CREF_PUSHED_BY_EVAL(cref)) {
1070 return CREF_CLASS_FOR_DEFINITION(cref);
1071 }
1072 cref = CREF_NEXT(cref);
1073 }
1074
1075 return Qundef;
1076}
1077
1078static inline void
1079vm_check_if_namespace(VALUE klass)
1080{
1081 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1082 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1083 }
1084}
1085
1086static inline void
1087vm_ensure_not_refinement_module(VALUE self)
1088{
1089 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1090 rb_warn("not defined at the refinement, but at the outer class/module");
1091 }
1092}
1093
1094static inline VALUE
1095vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1096{
1097 return klass;
1098}
1099
1100static inline VALUE
1101vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1102{
1103 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1104 VALUE val;
1105
1106 if (NIL_P(orig_klass) && allow_nil) {
1107 /* in current lexical scope */
1108 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1109 const rb_cref_t *cref;
1110 VALUE klass = Qnil;
1111
1112 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1113 root_cref = CREF_NEXT(root_cref);
1114 }
1115 cref = root_cref;
1116 while (cref && CREF_NEXT(cref)) {
1117 if (CREF_PUSHED_BY_EVAL(cref)) {
1118 klass = Qnil;
1119 }
1120 else {
1121 klass = CREF_CLASS(cref);
1122 }
1123 cref = CREF_NEXT(cref);
1124
1125 if (!NIL_P(klass)) {
1126 VALUE av, am = 0;
1127 rb_const_entry_t *ce;
1128 search_continue:
1129 if ((ce = rb_const_lookup(klass, id))) {
1130 rb_const_warn_if_deprecated(ce, klass, id);
1131 val = ce->value;
1132 if (UNDEF_P(val)) {
1133 if (am == klass) break;
1134 am = klass;
1135 if (is_defined) return 1;
1136 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1137 rb_autoload_load(klass, id);
1138 goto search_continue;
1139 }
1140 else {
1141 if (is_defined) {
1142 return 1;
1143 }
1144 else {
1145 if (UNLIKELY(!rb_ractor_main_p())) {
1146 if (!rb_ractor_shareable_p(val)) {
1147 rb_raise(rb_eRactorIsolationError,
1148 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1149 }
1150 }
1151 return val;
1152 }
1153 }
1154 }
1155 }
1156 }
1157
1158 /* search self */
1159 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1160 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1161 }
1162 else {
1163 klass = CLASS_OF(ec->cfp->self);
1164 }
1165
1166 if (is_defined) {
1167 return rb_const_defined(klass, id);
1168 }
1169 else {
1170 return rb_const_get(klass, id);
1171 }
1172 }
1173 else {
1174 vm_check_if_namespace(orig_klass);
1175 if (is_defined) {
1176 return rb_public_const_defined_from(orig_klass, id);
1177 }
1178 else {
1179 return rb_public_const_get_from(orig_klass, id);
1180 }
1181 }
1182}
1183
1184VALUE
1185rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1186{
1187 return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1188}
1189
1190static inline VALUE
1191vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1192{
1193 VALUE val = Qnil;
1194 int idx = 0;
1195 int allow_nil = TRUE;
1196 if (segments[0] == idNULL) {
1197 val = rb_cObject;
1198 idx++;
1199 allow_nil = FALSE;
1200 }
1201 while (segments[idx]) {
1202 ID id = segments[idx++];
1203 val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1204 allow_nil = FALSE;
1205 }
1206 return val;
1207}
1208
1209
1210static inline VALUE
1211vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1212{
1213 VALUE klass;
1214
1215 if (!cref) {
1216 rb_bug("vm_get_cvar_base: no cref");
1217 }
1218
1219 while (CREF_NEXT(cref) &&
1220 (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1221 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1222 cref = CREF_NEXT(cref);
1223 }
1224 if (top_level_raise && !CREF_NEXT(cref)) {
1225 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1226 }
1227
1228 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1229
1230 if (NIL_P(klass)) {
1231 rb_raise(rb_eTypeError, "no class variables available");
1232 }
1233 return klass;
1234}
1235
1236ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1237static inline void
1238fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1239{
1240 if (is_attr) {
1241 vm_cc_attr_index_set(cc, index, shape_id);
1242 }
1243 else {
1244 vm_ic_attr_index_set(iseq, ic, index, shape_id);
1245 }
1246}
1247
1248#define ractor_incidental_shareable_p(cond, val) \
1249 (!(cond) || rb_ractor_shareable_p(val))
1250#define ractor_object_incidental_shareable_p(obj, val) \
1251 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1252
1253ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1254static inline VALUE
1255vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1256{
1257 VALUE fields_obj;
1258#if OPT_IC_FOR_IVAR
1259 if (SPECIAL_CONST_P(obj)) {
1260 return default_value;
1261 }
1262
1263 switch (BUILTIN_TYPE(obj)) {
1264 case T_OBJECT:
1265 fields_obj = obj;
1266 break;
1267 case T_CLASS:
1268 case T_MODULE:
1269 {
1270 if (UNLIKELY(!rb_ractor_main_p())) {
1271 // For two reasons we can only use the fast path on the main
1272 // ractor.
1273 // First, only the main ractor is allowed to set ivars on classes
1274 // and modules. So we can skip locking.
1275 // Second, other ractors need to check the shareability of the
1276 // values returned from the class ivars.
1277
1278 if (default_value == Qundef) { // defined?
1279 return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1280 }
1281 else {
1282 goto general_path;
1283 }
1284 }
1285
1286 fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1287 break;
1288 }
1289 default:
1290 fields_obj = rb_obj_fields(obj, id);
1291 }
1292
1293 if (!fields_obj) {
1294 return default_value;
1295 }
1296
1297 VALUE val = Qundef;
1298
1299 shape_id_t shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
1300 VALUE *ivar_list = rb_imemo_fields_ptr(fields_obj);
1301
1302 shape_id_t cached_id;
1303 attr_index_t index;
1304
1305 if (is_attr) {
1306 vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1307 }
1308 else {
1309 vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1310 }
1311
1312 if (LIKELY(cached_id == shape_id)) {
1313 RUBY_ASSERT(!rb_shape_too_complex_p(cached_id));
1314
1315 if (index == ATTR_INDEX_NOT_SET) {
1316 return default_value;
1317 }
1318
1319 val = ivar_list[index];
1320#if USE_DEBUG_COUNTER
1321 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1322
1323 if (RB_TYPE_P(obj, T_OBJECT)) {
1324 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1325 }
1326#endif
1327 RUBY_ASSERT(!UNDEF_P(val));
1328 }
1329 else { // cache miss case
1330#if USE_DEBUG_COUNTER
1331 if (is_attr) {
1332 if (cached_id != INVALID_SHAPE_ID) {
1333 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1334 }
1335 else {
1336 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1337 }
1338 }
1339 else {
1340 if (cached_id != INVALID_SHAPE_ID) {
1341 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1345 }
1346 }
1347 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1348
1349 if (RB_TYPE_P(obj, T_OBJECT)) {
1350 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1351 }
1352#endif
1353
1354 if (UNLIKELY(rb_shape_too_complex_p(shape_id))) {
1355 st_table *table = (st_table *)ivar_list;
1356
1357 RUBY_ASSERT(table);
1358 RUBY_ASSERT(table == rb_imemo_fields_complex_tbl(fields_obj));
1359
1360 if (!st_lookup(table, id, &val)) {
1361 val = default_value;
1362 }
1363 }
1364 else {
1365 shape_id_t previous_cached_id = cached_id;
1366 if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1367 // This fills in the cache with the shared cache object.
1368 // "ent" is the shared cache object
1369 if (cached_id != previous_cached_id) {
1370 fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1371 }
1372
1373 if (index == ATTR_INDEX_NOT_SET) {
1374 val = default_value;
1375 }
1376 else {
1377 // We fetched the ivar list above
1378 val = ivar_list[index];
1379 RUBY_ASSERT(!UNDEF_P(val));
1380 }
1381 }
1382 else {
1383 if (is_attr) {
1384 vm_cc_attr_index_initialize(cc, shape_id);
1385 }
1386 else {
1387 vm_ic_attr_index_initialize(ic, shape_id);
1388 }
1389
1390 val = default_value;
1391 }
1392 }
1393 }
1394
1395 if (!UNDEF_P(default_value)) {
1396 RUBY_ASSERT(!UNDEF_P(val));
1397 }
1398
1399 return val;
1400
1401general_path:
1402#endif /* OPT_IC_FOR_IVAR */
1403 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404
1405 if (is_attr) {
1406 return rb_attr_get(obj, id);
1407 }
1408 else {
1409 return rb_ivar_get(obj, id);
1410 }
1411}
1412
1413static void
1414populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415{
1416 RUBY_ASSERT(!rb_shape_too_complex_p(next_shape_id));
1417
1418 // Cache population code
1419 if (is_attr) {
1420 vm_cc_attr_index_set(cc, index, next_shape_id);
1421 }
1422 else {
1423 vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424 }
1425}
1426
1427ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430
1431static VALUE
1432vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433{
1434#if OPT_IC_FOR_IVAR
1435 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436
1437 rb_check_frozen(obj);
1438
1439 attr_index_t index = rb_ivar_set_index(obj, id, val);
1440 shape_id_t next_shape_id = RBASIC_SHAPE_ID(obj);
1441
1442 if (!rb_shape_too_complex_p(next_shape_id)) {
1443 populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1444 }
1445
1446 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1447 return val;
1448#else
1449 return rb_ivar_set(obj, id, val);
1450#endif
1451}
1452
1453static VALUE
1454vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1455{
1456 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1457}
1458
1459static VALUE
1460vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1461{
1462 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1463}
1464
1465NOINLINE(static VALUE vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1466static VALUE
1467vm_setivar_class(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1468{
1469 if (UNLIKELY(!rb_ractor_main_p())) {
1470 return Qundef;
1471 }
1472
1473 VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
1474 if (UNLIKELY(!fields_obj)) {
1475 return Qundef;
1476 }
1477
1478 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
1479
1480 // Cache hit case
1481 if (shape_id == dest_shape_id) {
1482 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1483 }
1484 else if (dest_shape_id != INVALID_SHAPE_ID) {
1485 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1486 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1487 }
1488 else {
1489 return Qundef;
1490 }
1491 }
1492 else {
1493 return Qundef;
1494 }
1495
1496 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1497
1498 if (shape_id != dest_shape_id) {
1499 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1500 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1501 }
1502
1503 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1504
1505 return val;
1506}
1507
1508NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1509static VALUE
1510vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1511{
1512 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1513
1514 // Cache hit case
1515 if (shape_id == dest_shape_id) {
1516 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1517 }
1518 else if (dest_shape_id != INVALID_SHAPE_ID) {
1519 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1520 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1521 }
1522 else {
1523 return Qundef;
1524 }
1525 }
1526 else {
1527 return Qundef;
1528 }
1529
1530 VALUE fields_obj = rb_obj_fields(obj, id);
1531 RUBY_ASSERT(fields_obj);
1532 RB_OBJ_WRITE(fields_obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
1533
1534 if (shape_id != dest_shape_id) {
1535 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1536 RBASIC_SET_SHAPE_ID(fields_obj, dest_shape_id);
1537 }
1538
1539 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1540
1541 return val;
1542}
1543
1544static inline VALUE
1545vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1546{
1547#if OPT_IC_FOR_IVAR
1548 switch (BUILTIN_TYPE(obj)) {
1549 case T_OBJECT:
1550 {
1551 VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1552
1553 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1554 RUBY_ASSERT(dest_shape_id == INVALID_SHAPE_ID || !rb_shape_too_complex_p(dest_shape_id));
1555
1556 if (LIKELY(shape_id == dest_shape_id)) {
1557 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1558 VM_ASSERT(!rb_ractor_shareable_p(obj));
1559 }
1560 else if (dest_shape_id != INVALID_SHAPE_ID) {
1561 if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) {
1562 RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1563
1564 RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1565
1566 RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id));
1567 }
1568 else {
1569 break;
1570 }
1571 }
1572 else {
1573 break;
1574 }
1575
1576 VALUE *ptr = ROBJECT_FIELDS(obj);
1577
1578 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1579 RB_OBJ_WRITE(obj, &ptr[index], val);
1580
1581 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1582 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1583 return val;
1584 }
1585 break;
1586 case T_CLASS:
1587 case T_MODULE:
1588 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1589 default:
1590 break;
1591 }
1592
1593 return Qundef;
1594#endif /* OPT_IC_FOR_IVAR */
1595}
1596
1597static VALUE
1598update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1599{
1600 VALUE defined_class = 0;
1601 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1602
1603 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1604 defined_class = RBASIC(defined_class)->klass;
1605 }
1606
1607 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1608 if (!rb_cvc_tbl) {
1609 rb_bug("the cvc table should be set");
1610 }
1611
1612 VALUE ent_data;
1613 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1614 rb_bug("should have cvar cache entry");
1615 }
1616
1617 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1618
1619 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1620 ent->cref = cref;
1621 ic->entry = ent;
1622
1623 RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1624 RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1625 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1626 RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1627
1628 return cvar_value;
1629}
1630
1631static inline VALUE
1632vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1633{
1634 const rb_cref_t *cref;
1635 cref = vm_get_cref(GET_EP());
1636
1637 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1638 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1639
1640 VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1641 RUBY_ASSERT(!UNDEF_P(v));
1642
1643 return v;
1644 }
1645
1646 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647
1648 return update_classvariable_cache(iseq, klass, id, cref, ic);
1649}
1650
1651VALUE
1652rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1653{
1654 return vm_getclassvariable(iseq, cfp, id, ic);
1655}
1656
1657static inline void
1658vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1659{
1660 const rb_cref_t *cref;
1661 cref = vm_get_cref(GET_EP());
1662
1663 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1664 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1665
1666 rb_class_ivar_set(ic->entry->class_value, id, val);
1667 return;
1668 }
1669
1670 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1671
1672 rb_cvar_set(klass, id, val);
1673
1674 update_classvariable_cache(iseq, klass, id, cref, ic);
1675}
1676
1677void
1678rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1679{
1680 vm_setclassvariable(iseq, cfp, id, val, ic);
1681}
1682
1683static inline VALUE
1684vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1685{
1686 return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1687}
1688
1689static inline void
1690vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1691{
1692 if (RB_SPECIAL_CONST_P(obj)) {
1694 return;
1695 }
1696
1697 shape_id_t dest_shape_id;
1698 attr_index_t index;
1699 vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1700
1701 if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1702 switch (BUILTIN_TYPE(obj)) {
1703 case T_OBJECT:
1704 break;
1705 case T_CLASS:
1706 case T_MODULE:
1707 if (!UNDEF_P(vm_setivar_class(obj, id, val, dest_shape_id, index))) {
1708 return;
1709 }
1710 break;
1711 default:
1712 if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1713 return;
1714 }
1715 }
1716 vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1717 }
1718}
1719
1720void
1721rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1722{
1723 vm_setinstancevariable(iseq, obj, id, val, ic);
1724}
1725
1726static VALUE
1727vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1728{
1729 /* continue throw */
1730
1731 if (FIXNUM_P(err)) {
1732 ec->tag->state = RUBY_TAG_FATAL;
1733 }
1734 else if (SYMBOL_P(err)) {
1735 ec->tag->state = TAG_THROW;
1736 }
1737 else if (THROW_DATA_P(err)) {
1738 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1739 }
1740 else {
1741 ec->tag->state = TAG_RAISE;
1742 }
1743 return err;
1744}
1745
1746static VALUE
1747vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1748 const int flag, const VALUE throwobj)
1749{
1750 const rb_control_frame_t *escape_cfp = NULL;
1751 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1752
1753 if (flag != 0) {
1754 /* do nothing */
1755 }
1756 else if (state == TAG_BREAK) {
1757 int is_orphan = 1;
1758 const VALUE *ep = GET_EP();
1759 const rb_iseq_t *base_iseq = GET_ISEQ();
1760 escape_cfp = reg_cfp;
1761
1762 while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1763 if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1764 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1765 ep = escape_cfp->ep;
1766 base_iseq = escape_cfp->iseq;
1767 }
1768 else {
1769 ep = VM_ENV_PREV_EP(ep);
1770 base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1771 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1772 VM_ASSERT(escape_cfp->iseq == base_iseq);
1773 }
1774 }
1775
1776 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1777 /* lambda{... break ...} */
1778 is_orphan = 0;
1779 state = TAG_RETURN;
1780 }
1781 else {
1782 ep = VM_ENV_PREV_EP(ep);
1783
1784 while (escape_cfp < eocfp) {
1785 if (escape_cfp->ep == ep) {
1786 const rb_iseq_t *const iseq = escape_cfp->iseq;
1787 const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1788 const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1789 unsigned int i;
1790
1791 if (!ct) break;
1792 for (i=0; i < ct->size; i++) {
1793 const struct iseq_catch_table_entry *const entry =
1794 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1795
1796 if (entry->type == CATCH_TYPE_BREAK &&
1797 entry->iseq == base_iseq &&
1798 entry->start < epc && entry->end >= epc) {
1799 if (entry->cont == epc) { /* found! */
1800 is_orphan = 0;
1801 }
1802 break;
1803 }
1804 }
1805 break;
1806 }
1807
1808 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1809 }
1810 }
1811
1812 if (is_orphan) {
1813 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1814 }
1815 }
1816 else if (state == TAG_RETRY) {
1817 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1818
1819 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1820 }
1821 else if (state == TAG_RETURN) {
1822 const VALUE *current_ep = GET_EP();
1823 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1824 int in_class_frame = 0;
1825 int toplevel = 1;
1826 escape_cfp = reg_cfp;
1827
1828 // find target_lep, target_ep
1829 while (!VM_ENV_LOCAL_P(ep)) {
1830 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1831 target_ep = ep;
1832 }
1833 ep = VM_ENV_PREV_EP(ep);
1834 }
1835 target_lep = ep;
1836
1837 while (escape_cfp < eocfp) {
1838 const VALUE *lep = VM_CF_LEP(escape_cfp);
1839
1840 if (!target_lep) {
1841 target_lep = lep;
1842 }
1843
1844 if (lep == target_lep &&
1845 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1846 ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1847 in_class_frame = 1;
1848 target_lep = 0;
1849 }
1850
1851 if (lep == target_lep) {
1852 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1853 toplevel = 0;
1854 if (in_class_frame) {
1855 /* lambda {class A; ... return ...; end} */
1856 goto valid_return;
1857 }
1858 else {
1859 const VALUE *tep = current_ep;
1860
1861 while (target_lep != tep) {
1862 if (escape_cfp->ep == tep) {
1863 /* in lambda */
1864 if (tep == target_ep) {
1865 goto valid_return;
1866 }
1867 else {
1868 goto unexpected_return;
1869 }
1870 }
1871 tep = VM_ENV_PREV_EP(tep);
1872 }
1873 }
1874 }
1875 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1876 switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1877 case ISEQ_TYPE_TOP:
1878 case ISEQ_TYPE_MAIN:
1879 if (toplevel) {
1880 if (in_class_frame) goto unexpected_return;
1881 if (target_ep == NULL) {
1882 goto valid_return;
1883 }
1884 else {
1885 goto unexpected_return;
1886 }
1887 }
1888 break;
1889 case ISEQ_TYPE_EVAL: {
1890 const rb_iseq_t *is = escape_cfp->iseq;
1891 enum rb_iseq_type t = ISEQ_BODY(is)->type;
1892 while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1893 if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1894 t = ISEQ_BODY(is)->type;
1895 }
1896 toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1897 break;
1898 }
1899 case ISEQ_TYPE_CLASS:
1900 toplevel = 0;
1901 break;
1902 default:
1903 break;
1904 }
1905 }
1906 }
1907
1908 if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1909 if (target_ep == NULL) {
1910 goto valid_return;
1911 }
1912 else {
1913 goto unexpected_return;
1914 }
1915 }
1916
1917 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1918 }
1919 unexpected_return:;
1920 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1921
1922 valid_return:;
1923 /* do nothing */
1924 }
1925 else {
1926 rb_bug("isns(throw): unsupported throw type");
1927 }
1928
1929 ec->tag->state = state;
1930 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1931}
1932
1933static VALUE
1934vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1935 rb_num_t throw_state, VALUE throwobj)
1936{
1937 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1938 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1939
1940 if (state != 0) {
1941 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1942 }
1943 else {
1944 return vm_throw_continue(ec, throwobj);
1945 }
1946}
1947
1948VALUE
1949rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1950{
1951 return vm_throw(ec, reg_cfp, throw_state, throwobj);
1952}
1953
1954static inline void
1955vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1956{
1957 int is_splat = flag & 0x01;
1958 const VALUE *ptr;
1959 rb_num_t len;
1960 const VALUE obj = ary;
1961
1962 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1963 ary = obj;
1964 ptr = &ary;
1965 len = 1;
1966 }
1967 else {
1968 ptr = RARRAY_CONST_PTR(ary);
1969 len = (rb_num_t)RARRAY_LEN(ary);
1970 }
1971
1972 if (num + is_splat == 0) {
1973 /* no space left on stack */
1974 }
1975 else if (flag & 0x02) {
1976 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1977 rb_num_t i = 0, j;
1978
1979 if (len < num) {
1980 for (i = 0; i < num - len; i++) {
1981 *cfp->sp++ = Qnil;
1982 }
1983 }
1984
1985 for (j = 0; i < num; i++, j++) {
1986 VALUE v = ptr[len - j - 1];
1987 *cfp->sp++ = v;
1988 }
1989
1990 if (is_splat) {
1991 *cfp->sp++ = rb_ary_new4(len - j, ptr);
1992 }
1993 }
1994 else {
1995 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1996 if (is_splat) {
1997 if (num > len) {
1998 *cfp->sp++ = rb_ary_new();
1999 }
2000 else {
2001 *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
2002 }
2003 }
2004
2005 if (num > len) {
2006 rb_num_t i = 0;
2007 for (; i < num - len; i++) {
2008 *cfp->sp++ = Qnil;
2009 }
2010
2011 for (rb_num_t j = 0; i < num; i++, j++) {
2012 *cfp->sp++ = ptr[len - j - 1];
2013 }
2014 }
2015 else {
2016 for (rb_num_t j = 0; j < num; j++) {
2017 *cfp->sp++ = ptr[num - j - 1];
2018 }
2019 }
2020 }
2021
2022 RB_GC_GUARD(ary);
2023}
2024
2025static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2026
2027static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2028
2029static struct rb_class_cc_entries *
2030vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2031{
2032 int initial_capa = 2;
2033 struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
2034#if VM_CHECK_MODE > 0
2035 ccs->debug_sig = ~(VALUE)ccs;
2036#endif
2037 ccs->capa = initial_capa;
2038 ccs->len = 0;
2039 ccs->cme = cme;
2040 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2041
2042 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2043 RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
2044 return ccs;
2045}
2046
2047static void
2048vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2049{
2050 if (! vm_cc_markable(cc)) {
2051 return;
2052 }
2053
2054 if (UNLIKELY(ccs->len == ccs->capa)) {
2055 RUBY_ASSERT(ccs->capa > 0);
2056 ccs->capa *= 2;
2057 ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
2058#if VM_CHECK_MODE > 0
2059 ccs->debug_sig = ~(VALUE)ccs;
2060#endif
2061 // GC?
2062 rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2063 }
2064 VM_ASSERT(ccs->len < ccs->capa);
2065
2066 const int pos = ccs->len++;
2067 ccs->entries[pos].argc = vm_ci_argc(ci);
2068 ccs->entries[pos].flag = vm_ci_flag(ci);
2069 RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
2070
2071 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2072 // for tuning
2073 // vm_mtbl_dump(klass, 0);
2074 }
2075}
2076
2077#if VM_CHECK_MODE > 0
2078void
2079rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2080{
2081 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2082 for (int i=0; i<ccs->len; i++) {
2083 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2084 ccs->entries[i].flag,
2085 ccs->entries[i].argc);
2086 rp(ccs->entries[i].cc);
2087 }
2088}
2089
2090static int
2091vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2092{
2093 VM_ASSERT(vm_ccs_p(ccs));
2094 VM_ASSERT(ccs->len <= ccs->capa);
2095
2096 for (int i=0; i<ccs->len; i++) {
2097 const struct rb_callcache *cc = ccs->entries[i].cc;
2098
2099 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2100 VM_ASSERT(vm_cc_class_check(cc, klass));
2101 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2102 VM_ASSERT(!vm_cc_super_p(cc));
2103 VM_ASSERT(!vm_cc_refinement_p(cc));
2104 }
2105 return TRUE;
2106}
2107#endif
2108
2109const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2110
2111static void
2112vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
2113{
2114 ASSERT_vm_locking();
2115
2116 if (rb_multi_ractor_p()) {
2117 if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
2118 // Another ractor updated the CC table while we were waiting on the VM lock.
2119 // We have to retry.
2120 return;
2121 }
2122
2123 VALUE ccs_obj = 0;
2124 rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj);
2125 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
2126
2127 if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2128 // Another ractor replaced that entry while we were waiting on the VM lock.
2129 return;
2130 }
2131
2132 VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
2133 rb_vm_cc_table_delete(new_table, mid);
2134 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
2135 }
2136 else {
2137 rb_vm_cc_table_delete(cc_tbl, mid);
2138 }
2139}
2140
2141static const struct rb_callcache *
2142vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
2143{
2144 ASSERT_vm_locking();
2145
2146 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2147
2148 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
2149
2150 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2151
2152 if (cme == NULL) {
2153 // undef or not found: can't cache the information
2154 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2155 return &vm_empty_cc;
2156 }
2157
2158 VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
2159 const VALUE original_cc_table = cc_tbl;
2160 if (!cc_tbl) {
2161 // Is this possible after rb_callable_method_entry ?
2162 cc_tbl = rb_vm_cc_table_create(1);
2163 }
2164 else if (rb_multi_ractor_p()) {
2165 cc_tbl = rb_vm_cc_table_dup(cc_tbl);
2166 }
2167
2168 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2169
2170 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2171
2172 VM_ASSERT(cc_tbl);
2173
2174 struct rb_class_cc_entries *ccs = NULL;
2175 {
2176 VALUE ccs_obj;
2177 if (UNLIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj))) {
2178 ccs = (struct rb_class_cc_entries *)ccs_obj;
2179 }
2180 else {
2181 // TODO: required?
2182 ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2183 }
2184 }
2185
2186 cme = rb_check_overloaded_cme(cme, ci);
2187
2188 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2189 vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
2190
2191 VM_ASSERT(vm_cc_cme(cc) != NULL);
2192 VM_ASSERT(cme->called_id == mid);
2193 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2194
2195 if (original_cc_table != cc_tbl) {
2196 RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
2197 }
2198
2199 return cc;
2200}
2201
2202static const struct rb_callcache *
2203vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
2204{
2205 VALUE cc_tbl;
2206 struct rb_class_cc_entries *ccs;
2207retry:
2208 cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
2209 ccs = NULL;
2210
2211 if (cc_tbl) {
2212 // CCS data is keyed on method id, so we don't need the method id
2213 // for doing comparisons in the `for` loop below.
2214
2215 VALUE ccs_obj;
2216 if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_obj)) {
2217 ccs = (struct rb_class_cc_entries *)ccs_obj;
2218 const int ccs_len = ccs->len;
2219
2220 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2221 RB_VM_LOCKING() {
2222 vm_evict_cc(klass, cc_tbl, mid);
2223 }
2224 goto retry;
2225 }
2226 else {
2227 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2228
2229 // We already know the method id is correct because we had
2230 // to look up the ccs_data by method id. All we need to
2231 // compare is argc and flag
2232 unsigned int argc = vm_ci_argc(ci);
2233 unsigned int flag = vm_ci_flag(ci);
2234
2235 for (int i=0; i<ccs_len; i++) {
2236 unsigned int ccs_ci_argc = ccs->entries[i].argc;
2237 unsigned int ccs_ci_flag = ccs->entries[i].flag;
2238 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2239
2240 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2241
2242 if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2243 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2244
2245 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2246 VM_ASSERT(ccs_cc->klass == klass);
2247 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2248
2249 return ccs_cc;
2250 }
2251 }
2252 }
2253 }
2254 }
2255
2256 RB_GC_GUARD(cc_tbl);
2257 return NULL;
2258}
2259
2260static const struct rb_callcache *
2261vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2262{
2263 const ID mid = vm_ci_mid(ci);
2264
2265 const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
2266 if (cc) {
2267 return cc;
2268 }
2269
2270 RB_VM_LOCKING() {
2271 if (rb_multi_ractor_p()) {
2272 // The CC may have been populated by another ractor while we were waiting on the lock,
2273 // so we must lookup a second time.
2274 cc = vm_lookup_cc(klass, ci, mid);
2275 }
2276
2277 if (!cc) {
2278 cc = vm_populate_cc(klass, ci, mid);
2279 }
2280 }
2281
2282 return cc;
2283}
2284
2285const struct rb_callcache *
2286rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2287{
2288 const struct rb_callcache *cc;
2289
2290 VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2291
2292 cc = vm_search_cc(klass, ci);
2293
2294 VM_ASSERT(cc);
2295 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2296 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2297 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2298 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2299 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2300
2301 return cc;
2302}
2303
2304static const struct rb_callcache *
2305vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2306{
2307#if USE_DEBUG_COUNTER
2308 const struct rb_callcache *old_cc = cd->cc;
2309#endif
2310
2311 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2312
2313#if OPT_INLINE_METHOD_CACHE
2314 cd->cc = cc;
2315
2316 const struct rb_callcache *empty_cc = &vm_empty_cc;
2317 if (cd_owner && cc != empty_cc) {
2318 RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2319 }
2320
2321#if USE_DEBUG_COUNTER
2322 if (!old_cc || old_cc == empty_cc) {
2323 // empty
2324 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2325 }
2326 else if (old_cc == cc) {
2327 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2328 }
2329 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2330 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2331 }
2332 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2333 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2334 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2335 }
2336 else {
2337 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2338 }
2339#endif
2340#endif // OPT_INLINE_METHOD_CACHE
2341
2342 VM_ASSERT(vm_cc_cme(cc) == NULL ||
2343 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2344
2345 return cc;
2346}
2347
2348ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2349static const struct rb_callcache *
2350vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2351{
2352 const struct rb_callcache *cc = cd->cc;
2353
2354#if OPT_INLINE_METHOD_CACHE
2355 if (LIKELY(vm_cc_class_check(cc, klass))) {
2356 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2357 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2358 RB_DEBUG_COUNTER_INC(mc_inline_hit);
2359 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2360 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2361 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2362
2363 return cc;
2364 }
2365 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2366 }
2367 else {
2368 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2369 }
2370#endif
2371
2372 return vm_search_method_slowpath0(cd_owner, cd, klass);
2373}
2374
2375static const struct rb_callable_method_entry_struct *
2376vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2377{
2378 VALUE klass = CLASS_OF(recv);
2379 VM_ASSERT(klass != Qfalse);
2380 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2381
2382 const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
2383 return vm_cc_cme(cc);
2384}
2385
2387rb_zjit_vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2388{
2389 return vm_search_method(cd_owner, cd, recv);
2390}
2391
2392#if __has_attribute(transparent_union)
2393typedef union {
2394 VALUE (*anyargs)(ANYARGS);
2395 VALUE (*f00)(VALUE);
2396 VALUE (*f01)(VALUE, VALUE);
2397 VALUE (*f02)(VALUE, VALUE, VALUE);
2398 VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2399 VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2400 VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2401 VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2402 VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2411 VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2412} __attribute__((__transparent_union__)) cfunc_type;
2413# define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2414#else
2415typedef VALUE (*cfunc_type)(ANYARGS);
2416# define make_cfunc_type(f) (cfunc_type)(f)
2417#endif
2418
2419static inline int
2420check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2421{
2422 if (! me) {
2423 return false;
2424 }
2425 else {
2426 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2427 VM_ASSERT(callable_method_entry_p(me));
2428 VM_ASSERT(me->def);
2429 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2430 return false;
2431 }
2432 else {
2433#if __has_attribute(transparent_union)
2434 return me->def->body.cfunc.func == func.anyargs;
2435#else
2436 return me->def->body.cfunc.func == func;
2437#endif
2438 }
2439 }
2440}
2441
2442static inline int
2443check_method_basic_definition(const rb_callable_method_entry_t *me)
2444{
2445 return me && METHOD_ENTRY_BASIC(me);
2446}
2447
2448static inline int
2449vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2450{
2451 VM_ASSERT(iseq != NULL);
2452 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
2453 return check_cfunc(cme, func);
2454}
2455
2456bool
2457rb_zjit_cme_is_cfunc(const rb_callable_method_entry_t *me, const cfunc_type func)
2458{
2459 return check_cfunc(me, func);
2460}
2461
2462int
2463rb_vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2464{
2465 return vm_method_cfunc_is(iseq, cd, recv, func);
2466}
2467
2468#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2469#define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2470
2471#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2472
2473static inline bool
2474FIXNUM_2_P(VALUE a, VALUE b)
2475{
2476 /* FIXNUM_P(a) && FIXNUM_P(b)
2477 * == ((a & 1) && (b & 1))
2478 * == a & b & 1 */
2479 SIGNED_VALUE x = a;
2480 SIGNED_VALUE y = b;
2481 SIGNED_VALUE z = x & y & 1;
2482 return z == 1;
2483}
2484
2485static inline bool
2486FLONUM_2_P(VALUE a, VALUE b)
2487{
2488#if USE_FLONUM
2489 /* FLONUM_P(a) && FLONUM_P(b)
2490 * == ((a & 3) == 2) && ((b & 3) == 2)
2491 * == ! ((a ^ 2) | (b ^ 2) & 3)
2492 */
2493 SIGNED_VALUE x = a;
2494 SIGNED_VALUE y = b;
2495 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2496 return !z;
2497#else
2498 return false;
2499#endif
2500}
2501
2502static VALUE
2503opt_equality_specialized(VALUE recv, VALUE obj)
2504{
2505 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2506 goto compare_by_identity;
2507 }
2508 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2509 goto compare_by_identity;
2510 }
2511 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2512 goto compare_by_identity;
2513 }
2514 else if (SPECIAL_CONST_P(recv)) {
2515 //
2516 }
2517 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2518 double a = RFLOAT_VALUE(recv);
2519 double b = RFLOAT_VALUE(obj);
2520
2521#if MSC_VERSION_BEFORE(1300)
2522 if (isnan(a)) {
2523 return Qfalse;
2524 }
2525 else if (isnan(b)) {
2526 return Qfalse;
2527 }
2528 else
2529#endif
2530 return RBOOL(a == b);
2531 }
2532 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2533 if (recv == obj) {
2534 return Qtrue;
2535 }
2536 else if (RB_TYPE_P(obj, T_STRING)) {
2537 return rb_str_eql_internal(obj, recv);
2538 }
2539 }
2540 return Qundef;
2541
2542 compare_by_identity:
2543 return RBOOL(recv == obj);
2544}
2545
2546static VALUE
2547opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2548{
2549 VM_ASSERT(cd_owner != NULL);
2550
2551 VALUE val = opt_equality_specialized(recv, obj);
2552 if (!UNDEF_P(val)) return val;
2553
2554 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2555 return Qundef;
2556 }
2557 else {
2558 return RBOOL(recv == obj);
2559 }
2560}
2561
2562#undef EQ_UNREDEFINED_P
2563
2564static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2565NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2566
2567static VALUE
2568opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2569{
2570 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2571
2572 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2573 return RBOOL(recv == obj);
2574 }
2575 else {
2576 return Qundef;
2577 }
2578}
2579
2580static VALUE
2581opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2582{
2583 VALUE val = opt_equality_specialized(recv, obj);
2584 if (!UNDEF_P(val)) {
2585 return val;
2586 }
2587 else {
2588 return opt_equality_by_mid_slowpath(recv, obj, mid);
2589 }
2590}
2591
2592VALUE
2593rb_equal_opt(VALUE obj1, VALUE obj2)
2594{
2595 return opt_equality_by_mid(obj1, obj2, idEq);
2596}
2597
2598VALUE
2599rb_eql_opt(VALUE obj1, VALUE obj2)
2600{
2601 return opt_equality_by_mid(obj1, obj2, idEqlP);
2602}
2603
2604extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2605extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2606
2607static VALUE
2608check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2609{
2610 switch (type) {
2611 case VM_CHECKMATCH_TYPE_WHEN:
2612 return pattern;
2613 case VM_CHECKMATCH_TYPE_RESCUE:
2614 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2615 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2616 }
2617 /* fall through */
2618 case VM_CHECKMATCH_TYPE_CASE: {
2619 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2620 }
2621 default:
2622 rb_bug("check_match: unreachable");
2623 }
2624}
2625
2626
2627#if MSC_VERSION_BEFORE(1300)
2628#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2629#else
2630#define CHECK_CMP_NAN(a, b) /* do nothing */
2631#endif
2632
2633static inline VALUE
2634double_cmp_lt(double a, double b)
2635{
2636 CHECK_CMP_NAN(a, b);
2637 return RBOOL(a < b);
2638}
2639
2640static inline VALUE
2641double_cmp_le(double a, double b)
2642{
2643 CHECK_CMP_NAN(a, b);
2644 return RBOOL(a <= b);
2645}
2646
2647static inline VALUE
2648double_cmp_gt(double a, double b)
2649{
2650 CHECK_CMP_NAN(a, b);
2651 return RBOOL(a > b);
2652}
2653
2654static inline VALUE
2655double_cmp_ge(double a, double b)
2656{
2657 CHECK_CMP_NAN(a, b);
2658 return RBOOL(a >= b);
2659}
2660
2661// Copied by vm_dump.c
2662static inline VALUE *
2663vm_base_ptr(const rb_control_frame_t *cfp)
2664{
2665 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2666
2667 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2668 VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2669
2670 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2671 int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2672 int params = ISEQ_BODY(cfp->iseq)->param.size;
2673
2674 CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2675 bp += vm_ci_argc(ci);
2676 }
2677
2678 if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2679 /* adjust `self' */
2680 bp += 1;
2681 }
2682#if VM_DEBUG_BP_CHECK
2683 if (bp != cfp->bp_check) {
2684 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2685 (long)(cfp->bp_check - GET_EC()->vm_stack),
2686 (long)(bp - GET_EC()->vm_stack));
2687 rb_bug("vm_base_ptr: unreachable");
2688 }
2689#endif
2690 return bp;
2691 }
2692 else {
2693 return NULL;
2694 }
2695}
2696
2697VALUE *
2698rb_vm_base_ptr(const rb_control_frame_t *cfp)
2699{
2700 return vm_base_ptr(cfp);
2701}
2702
2703/* method call processes with call_info */
2704
2705#include "vm_args.c"
2706
2707static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2708ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2709static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2710static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2711static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2712static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2713static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2714
2715static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2716
2717static VALUE
2718vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2719{
2720 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2721
2722 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2723}
2724
2725static VALUE
2726vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2727{
2728 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2729
2730 const struct rb_callcache *cc = calling->cc;
2731 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2732 int param = ISEQ_BODY(iseq)->param.size;
2733 int local = ISEQ_BODY(iseq)->local_table_size;
2734 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2735}
2736
2737bool
2738rb_simple_iseq_p(const rb_iseq_t *iseq)
2739{
2740 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2741 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2742 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2743 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2744 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2745 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2746 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2747 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2748}
2749
2750bool
2751rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2752{
2753 return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2754 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2755 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2756 ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2757 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2758 ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2759 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2760 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2761}
2762
2763bool
2764rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2765{
2766 return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2767 ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2768 ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2769 ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2770 ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2771 ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2772 ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2773}
2774
2775#define ALLOW_HEAP_ARGV (-2)
2776#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2777
2778static inline bool
2779vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2780{
2781 vm_check_canary(GET_EC(), cfp->sp);
2782 bool ret = false;
2783
2784 if (!NIL_P(ary)) {
2785 const VALUE *ptr = RARRAY_CONST_PTR(ary);
2786 long len = RARRAY_LEN(ary);
2787 int argc = calling->argc;
2788
2789 if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2790 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2791 * a temporary array, instead of trying to keeping arguments on the VM stack.
2792 */
2793 VALUE *argv = cfp->sp - argc;
2794 VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2795 rb_ary_cat(argv_ary, argv, argc);
2796 rb_ary_cat(argv_ary, ptr, len);
2797 cfp->sp -= argc - 1;
2798 cfp->sp[-1] = argv_ary;
2799 calling->argc = 1;
2800 calling->heap_argv = argv_ary;
2801 RB_GC_GUARD(ary);
2802 }
2803 else {
2804 long i;
2805
2806 if (max_args >= 0 && len + argc > max_args) {
2807 /* If only a given max_args is allowed, copy up to max args.
2808 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2809 * where additional arguments are ignored.
2810 *
2811 * Also, copy up to one more argument than the maximum,
2812 * in case it is an empty keyword hash that will be removed.
2813 */
2814 calling->argc += len - (max_args - argc + 1);
2815 len = max_args - argc + 1;
2816 ret = true;
2817 }
2818 else {
2819 /* Unset heap_argv if set originally. Can happen when
2820 * forwarding modified arguments, where heap_argv was used
2821 * originally, but heap_argv not supported by the forwarded
2822 * method in all cases.
2823 */
2824 calling->heap_argv = 0;
2825 }
2826 CHECK_VM_STACK_OVERFLOW(cfp, len);
2827
2828 for (i = 0; i < len; i++) {
2829 *cfp->sp++ = ptr[i];
2830 }
2831 calling->argc += i;
2832 }
2833 }
2834
2835 return ret;
2836}
2837
2838static inline void
2839vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2840{
2841 const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2842 const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2843 const VALUE h = rb_hash_new_with_size(kw_len);
2844 VALUE *sp = cfp->sp;
2845 int i;
2846
2847 for (i=0; i<kw_len; i++) {
2848 rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2849 }
2850 (sp-kw_len)[0] = h;
2851
2852 cfp->sp -= kw_len - 1;
2853 calling->argc -= kw_len - 1;
2854 calling->kw_splat = 1;
2855}
2856
2857static inline VALUE
2858vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2859{
2860 if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2861 if (keyword_hash != Qnil) {
2862 /* Convert a non-hash keyword splat to a new hash */
2863 keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2864 }
2865 }
2866 else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2867 /* Convert a hash keyword splat to a new hash unless
2868 * a mutable keyword splat was passed.
2869 * Skip allocating new hash for empty keyword splat, as empty
2870 * keyword splat will be ignored by both callers.
2871 */
2872 keyword_hash = rb_hash_dup(keyword_hash);
2873 }
2874 return keyword_hash;
2875}
2876
2877static inline void
2878CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2879 struct rb_calling_info *restrict calling,
2880 const struct rb_callinfo *restrict ci, int max_args)
2881{
2882 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2883 if (IS_ARGS_KW_SPLAT(ci)) {
2884 // f(*a, **kw)
2885 VM_ASSERT(calling->kw_splat == 1);
2886
2887 cfp->sp -= 2;
2888 calling->argc -= 2;
2889 VALUE ary = cfp->sp[0];
2890 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2891
2892 // splat a
2893 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2894
2895 // put kw
2896 if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2897 if (UNLIKELY(calling->heap_argv)) {
2898 rb_ary_push(calling->heap_argv, kwh);
2899 ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2900 if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2901 calling->kw_splat = 0;
2902 }
2903 }
2904 else {
2905 cfp->sp[0] = kwh;
2906 cfp->sp++;
2907 calling->argc++;
2908
2909 VM_ASSERT(calling->kw_splat == 1);
2910 }
2911 }
2912 else {
2913 calling->kw_splat = 0;
2914 }
2915 }
2916 else {
2917 // f(*a)
2918 VM_ASSERT(calling->kw_splat == 0);
2919
2920 cfp->sp -= 1;
2921 calling->argc -= 1;
2922 VALUE ary = cfp->sp[0];
2923
2924 if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2925 goto check_keyword;
2926 }
2927
2928 // check the last argument
2929 VALUE last_hash, argv_ary;
2930 if (UNLIKELY(argv_ary = calling->heap_argv)) {
2931 if (!IS_ARGS_KEYWORD(ci) &&
2932 RARRAY_LEN(argv_ary) > 0 &&
2933 RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2934 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2935
2936 rb_ary_pop(argv_ary);
2937 if (!RHASH_EMPTY_P(last_hash)) {
2938 rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2939 calling->kw_splat = 1;
2940 }
2941 }
2942 }
2943 else {
2944check_keyword:
2945 if (!IS_ARGS_KEYWORD(ci) &&
2946 calling->argc > 0 &&
2947 RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2948 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2949
2950 if (RHASH_EMPTY_P(last_hash)) {
2951 calling->argc--;
2952 cfp->sp -= 1;
2953 }
2954 else {
2955 cfp->sp[-1] = rb_hash_dup(last_hash);
2956 calling->kw_splat = 1;
2957 }
2958 }
2959 }
2960 }
2961 }
2962 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2963 // f(**kw)
2964 VM_ASSERT(calling->kw_splat == 1);
2965 VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2966
2967 if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2968 cfp->sp--;
2969 calling->argc--;
2970 calling->kw_splat = 0;
2971 }
2972 else {
2973 cfp->sp[-1] = kwh;
2974 }
2975 }
2976 else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2977 // f(k1:1, k2:2)
2978 VM_ASSERT(calling->kw_splat == 0);
2979
2980 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2981 * by creating a keyword hash.
2982 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2983 */
2984 vm_caller_setup_arg_kw(cfp, calling, ci);
2985 }
2986}
2987
2988#define USE_OPT_HIST 0
2989
2990#if USE_OPT_HIST
2991#define OPT_HIST_MAX 64
2992static int opt_hist[OPT_HIST_MAX+1];
2993
2994__attribute__((destructor))
2995static void
2996opt_hist_show_results_at_exit(void)
2997{
2998 for (int i=0; i<OPT_HIST_MAX; i++) {
2999 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
3000 }
3001}
3002#endif
3003
3004static VALUE
3005vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3006 struct rb_calling_info *calling)
3007{
3008 const struct rb_callcache *cc = calling->cc;
3009 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3010 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3011 const int opt = calling->argc - lead_num;
3012 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3013 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3014 const int param = ISEQ_BODY(iseq)->param.size;
3015 const int local = ISEQ_BODY(iseq)->local_table_size;
3016 const int delta = opt_num - opt;
3017
3018 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3019
3020#if USE_OPT_HIST
3021 if (opt_pc < OPT_HIST_MAX) {
3022 opt_hist[opt]++;
3023 }
3024 else {
3025 opt_hist[OPT_HIST_MAX]++;
3026 }
3027#endif
3028
3029 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
3030}
3031
3032static VALUE
3033vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3034 struct rb_calling_info *calling)
3035{
3036 const struct rb_callcache *cc = calling->cc;
3037 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3038 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3039 const int opt = calling->argc - lead_num;
3040 const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3041
3042 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
3043
3044#if USE_OPT_HIST
3045 if (opt_pc < OPT_HIST_MAX) {
3046 opt_hist[opt]++;
3047 }
3048 else {
3049 opt_hist[OPT_HIST_MAX]++;
3050 }
3051#endif
3052
3053 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3054}
3055
3056static void
3057args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq, const rb_callable_method_entry_t *cme,
3058 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
3059 VALUE *const locals);
3060
3061static VALUE
3062vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3063 struct rb_calling_info *calling)
3064{
3065 const struct rb_callcache *cc = calling->cc;
3066 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3067 int param_size = ISEQ_BODY(iseq)->param.size;
3068 int local_size = ISEQ_BODY(iseq)->local_table_size;
3069
3070 // Setting up local size and param size
3071 VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3072
3073 local_size = local_size + vm_ci_argc(calling->cd->ci);
3074 param_size = param_size + vm_ci_argc(calling->cd->ci);
3075
3076 cfp->sp[0] = (VALUE)calling->cd->ci;
3077
3078 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
3079}
3080
3081static VALUE
3082vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3083 struct rb_calling_info *calling)
3084{
3085 const struct rb_callinfo *ci = calling->cd->ci;
3086 const struct rb_callcache *cc = calling->cc;
3087
3088 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
3089 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
3090
3091 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3092 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3093 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3094 const int ci_kw_len = kw_arg->keyword_len;
3095 const VALUE * const ci_keywords = kw_arg->keywords;
3096 VALUE *argv = cfp->sp - calling->argc;
3097 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3098 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3099 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3100 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3101 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3102
3103 int param = ISEQ_BODY(iseq)->param.size;
3104 int local = ISEQ_BODY(iseq)->local_table_size;
3105 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3106}
3107
3108static VALUE
3109vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3110 struct rb_calling_info *calling)
3111{
3112 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
3113 const struct rb_callcache *cc = calling->cc;
3114
3115 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
3116 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
3117
3118 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3119 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3120 VALUE * const argv = cfp->sp - calling->argc;
3121 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3122
3123 int i;
3124 for (i=0; i<kw_param->num; i++) {
3125 klocals[i] = kw_param->default_values[i];
3126 }
3127 klocals[i] = INT2FIX(0); // kw specify flag
3128 // NOTE:
3129 // nobody check this value, but it should be cleared because it can
3130 // points invalid VALUE (T_NONE objects, raw pointer and so on).
3131
3132 int param = ISEQ_BODY(iseq)->param.size;
3133 int local = ISEQ_BODY(iseq)->local_table_size;
3134 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3135}
3136
3137static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3138
3139static VALUE
3140vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3141 struct rb_calling_info *calling)
3142{
3143 const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3144 cfp->sp -= (calling->argc + 1);
3145 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3146 return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3147}
3148
3149VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3150
3151static void
3152warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3153{
3154 rb_vm_t *vm = GET_VM();
3155 set_table *dup_check_table = vm->unused_block_warning_table;
3156 st_data_t key;
3157 bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3158
3159 union {
3160 VALUE v;
3161 unsigned char b[SIZEOF_VALUE];
3162 } k1 = {
3163 .v = (VALUE)pc,
3164 }, k2 = {
3165 .v = (VALUE)cme->def,
3166 };
3167
3168 // relax check
3169 if (!strict_unused_block) {
3170 key = (st_data_t)cme->def->original_id;
3171
3172 if (set_table_lookup(dup_check_table, key)) {
3173 return;
3174 }
3175 }
3176
3177 // strict check
3178 // make unique key from pc and me->def pointer
3179 key = 0;
3180 for (int i=0; i<SIZEOF_VALUE; i++) {
3181 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3182 key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3183 }
3184
3185 if (0) {
3186 fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3187 fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3188 fprintf(stderr, "key:%p\n", (void *)key);
3189 }
3190
3191 // duplication check
3192 if (set_insert(dup_check_table, key)) {
3193 // already shown
3194 }
3195 else if (RTEST(ruby_verbose) || strict_unused_block) {
3196 VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3197 VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3198
3199 if (!NIL_P(m_loc)) {
3200 rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3201 name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3202 }
3203 else {
3204 rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3205 }
3206 }
3207}
3208
3209static inline int
3210vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3211 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3212{
3213 const struct rb_callinfo *ci = calling->cd->ci;
3214 const struct rb_callcache *cc = calling->cc;
3215
3216 VM_ASSERT((vm_ci_argc(ci), 1));
3217 VM_ASSERT(vm_cc_cme(cc) != NULL);
3218
3219 if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3220 calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3221 !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3222 warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3223 }
3224
3225 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3226 if (LIKELY(rb_simple_iseq_p(iseq))) {
3227 rb_control_frame_t *cfp = ec->cfp;
3228 int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3229 CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3230
3231 if (calling->argc != lead_num) {
3232 argument_arity_error(ec, iseq, vm_cc_cme(cc), calling->argc, lead_num, lead_num);
3233 }
3234
3235 //VM_ASSERT(ci == calling->cd->ci);
3236 VM_ASSERT(cc == calling->cc);
3237
3238 if (vm_call_iseq_optimizable_p(ci, cc)) {
3239 if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3240 !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3241 VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3242 vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3243 CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3244 }
3245 else {
3246 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3247 }
3248 }
3249 return 0;
3250 }
3251 else if (rb_iseq_only_optparam_p(iseq)) {
3252 rb_control_frame_t *cfp = ec->cfp;
3253
3254 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3255 const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3256
3257 CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3258 const int argc = calling->argc;
3259 const int opt = argc - lead_num;
3260
3261 if (opt < 0 || opt > opt_num) {
3262 argument_arity_error(ec, iseq, vm_cc_cme(cc), argc, lead_num, lead_num + opt_num);
3263 }
3264
3265 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3266 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3267 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3268 vm_call_cacheable(ci, cc));
3269 }
3270 else {
3271 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3272 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3273 vm_call_cacheable(ci, cc));
3274 }
3275
3276 /* initialize opt vars for self-references */
3277 VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3278 for (int i=argc; i<lead_num + opt_num; i++) {
3279 argv[i] = Qnil;
3280 }
3281 return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3282 }
3283 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3284 const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3285 const int argc = calling->argc;
3286 const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3287
3288 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3289 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3290
3291 if (argc - kw_arg->keyword_len == lead_num) {
3292 const int ci_kw_len = kw_arg->keyword_len;
3293 const VALUE * const ci_keywords = kw_arg->keywords;
3294 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3295 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3296
3297 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3298 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), ci_kws, ci_kw_len, ci_keywords, klocals);
3299
3300 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3301 vm_call_cacheable(ci, cc));
3302
3303 return 0;
3304 }
3305 }
3306 else if (argc == lead_num) {
3307 /* no kwarg */
3308 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3309 args_setup_kw_parameters(ec, iseq, vm_cc_cme(cc), NULL, 0, NULL, klocals);
3310
3311 if (klocals[kw_param->num] == INT2FIX(0)) {
3312 /* copy from default_values */
3313 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3314 vm_call_cacheable(ci, cc));
3315 }
3316
3317 return 0;
3318 }
3319 }
3320 }
3321
3322 // Called iseq is using ... param
3323 // def foo(...) # <- iseq for foo will have "forwardable"
3324 //
3325 // We want to set the `...` local to the caller's CI
3326 // foo(1, 2) # <- the ci for this should end up as `...`
3327 //
3328 // So hopefully the stack looks like:
3329 //
3330 // => 1
3331 // => 2
3332 // => *
3333 // => **
3334 // => &
3335 // => ... # <- points at `foo`s CI
3336 // => cref_or_me
3337 // => specval
3338 // => type
3339 //
3340 if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3341 bool can_fastpath = true;
3342
3343 if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3344 struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3345 if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3346 ci = vm_ci_new_runtime(
3347 vm_ci_mid(ci),
3348 vm_ci_flag(ci),
3349 vm_ci_argc(ci),
3350 vm_ci_kwarg(ci));
3351 }
3352 else {
3353 ci = forward_cd->caller_ci;
3354 }
3355 can_fastpath = false;
3356 }
3357 // C functions calling iseqs will stack allocate a CI,
3358 // so we need to convert it to heap allocated
3359 if (!vm_ci_markable(ci)) {
3360 ci = vm_ci_new_runtime(
3361 vm_ci_mid(ci),
3362 vm_ci_flag(ci),
3363 vm_ci_argc(ci),
3364 vm_ci_kwarg(ci));
3365 can_fastpath = false;
3366 }
3367 argv[param_size - 1] = (VALUE)ci;
3368 CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3369 return 0;
3370 }
3371
3372 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3373}
3374
3375static void
3376vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3377{
3378 // This case is when the caller is using a ... parameter.
3379 // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3380 // In this case the caller's caller's CI will be on the stack.
3381 //
3382 // For example:
3383 //
3384 // def bar(a, b); a + b; end
3385 // def foo(...); bar(...); end
3386 // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3387 //
3388 // Stack layout will be:
3389 //
3390 // > 1
3391 // > 2
3392 // > CI for foo(1, 2)
3393 // > cref_or_me
3394 // > specval
3395 // > type
3396 // > receiver
3397 // > CI for foo(1, 2), via `getlocal ...`
3398 // > ( SP points here )
3399 const VALUE * lep = VM_CF_LEP(cfp);
3400
3401 const rb_iseq_t *iseq;
3402
3403 // If we're in an escaped environment (lambda for example), get the iseq
3404 // from the captured env.
3405 if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3406 rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3407 iseq = env->iseq;
3408 }
3409 else { // Otherwise use the lep to find the caller
3410 iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3411 }
3412
3413 // Our local storage is below the args we need to copy
3414 int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3415
3416 const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3417 VALUE * to = cfp->sp - 1; // clobber the CI
3418
3419 if (RTEST(splat)) {
3420 to -= 1; // clobber the splat array
3421 CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3422 MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3423 to += RARRAY_LEN(splat);
3424 }
3425
3426 CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3427 MEMCPY(to, from, VALUE, argc);
3428 cfp->sp = to + argc;
3429
3430 // Stack layout should now be:
3431 //
3432 // > 1
3433 // > 2
3434 // > CI for foo(1, 2)
3435 // > cref_or_me
3436 // > specval
3437 // > type
3438 // > receiver
3439 // > 1
3440 // > 2
3441 // > ( SP points here )
3442}
3443
3444static VALUE
3445vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3446{
3447 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3448
3449 const struct rb_callcache *cc = calling->cc;
3450 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3451 int param_size = ISEQ_BODY(iseq)->param.size;
3452 int local_size = ISEQ_BODY(iseq)->local_table_size;
3453
3454 RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3455
3456 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3457 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3458}
3459
3460static VALUE
3461vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3462{
3463 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3464
3465 const struct rb_callcache *cc = calling->cc;
3466 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3467 int param_size = ISEQ_BODY(iseq)->param.size;
3468 int local_size = ISEQ_BODY(iseq)->local_table_size;
3469
3470 RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3471
3472 // Setting up local size and param size
3473 local_size = local_size + vm_ci_argc(calling->cd->ci);
3474 param_size = param_size + vm_ci_argc(calling->cd->ci);
3475
3476 const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3477 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3478}
3479
3480static inline VALUE
3481vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3482 int opt_pc, int param_size, int local_size)
3483{
3484 const struct rb_callinfo *ci = calling->cd->ci;
3485 const struct rb_callcache *cc = calling->cc;
3486
3487 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3488 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3489 }
3490 else {
3491 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3492 }
3493}
3494
3495static inline VALUE
3496vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3497 int opt_pc, int param_size, int local_size)
3498{
3499 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3500 VALUE *argv = cfp->sp - calling->argc;
3501 VALUE *sp = argv + param_size;
3502 cfp->sp = argv - 1 /* recv */;
3503
3504 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3505 calling->block_handler, (VALUE)me,
3506 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3507 local_size - param_size,
3508 ISEQ_BODY(iseq)->stack_max);
3509 return Qundef;
3510}
3511
3512static inline VALUE
3513vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3514{
3515 const struct rb_callcache *cc = calling->cc;
3516 unsigned int i;
3517 VALUE *argv = cfp->sp - calling->argc;
3518 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3519 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3520 VALUE *src_argv = argv;
3521 VALUE *sp_orig, *sp;
3522 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3523
3524 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3525 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3526 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3527 dst_captured->code.val = src_captured->code.val;
3528 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3529 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3530 }
3531 else {
3532 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3533 }
3534 }
3535
3536 vm_pop_frame(ec, cfp, cfp->ep);
3537 cfp = ec->cfp;
3538
3539 sp_orig = sp = cfp->sp;
3540
3541 /* push self */
3542 sp[0] = calling->recv;
3543 sp++;
3544
3545 /* copy arguments */
3546 for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3547 *sp++ = src_argv[i];
3548 }
3549
3550 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3551 calling->recv, calling->block_handler, (VALUE)me,
3552 ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3553 ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3554 ISEQ_BODY(iseq)->stack_max);
3555
3556 cfp->sp = sp_orig;
3557
3558 return Qundef;
3559}
3560
3561static void
3562ractor_unsafe_check(void)
3563{
3564 if (!rb_ractor_main_p()) {
3565 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3566 }
3567}
3568
3569static VALUE
3570call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3571{
3572 ractor_unsafe_check();
3573 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3574 return (*f)(recv, rb_ary_new4(argc, argv));
3575}
3576
3577static VALUE
3578call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3579{
3580 ractor_unsafe_check();
3581 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3582 return (*f)(argc, argv, recv);
3583}
3584
3585static VALUE
3586call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3587{
3588 ractor_unsafe_check();
3589 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3590 return (*f)(recv);
3591}
3592
3593static VALUE
3594call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3595{
3596 ractor_unsafe_check();
3597 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3598 return (*f)(recv, argv[0]);
3599}
3600
3601static VALUE
3602call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3603{
3604 ractor_unsafe_check();
3605 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3606 return (*f)(recv, argv[0], argv[1]);
3607}
3608
3609static VALUE
3610call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3611{
3612 ractor_unsafe_check();
3613 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3614 return (*f)(recv, argv[0], argv[1], argv[2]);
3615}
3616
3617static VALUE
3618call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3619{
3620 ractor_unsafe_check();
3621 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3622 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3623}
3624
3625static VALUE
3626call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3627{
3628 ractor_unsafe_check();
3629 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3630 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3631}
3632
3633static VALUE
3634call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3635{
3636 ractor_unsafe_check();
3638 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3639}
3640
3641static VALUE
3642call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643{
3644 ractor_unsafe_check();
3646 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3647}
3648
3649static VALUE
3650call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3651{
3652 ractor_unsafe_check();
3654 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3655}
3656
3657static VALUE
3658call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3659{
3660 ractor_unsafe_check();
3662 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3663}
3664
3665static VALUE
3666call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3667{
3668 ractor_unsafe_check();
3670 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3671}
3672
3673static VALUE
3674call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3675{
3676 ractor_unsafe_check();
3678 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3679}
3680
3681static VALUE
3682call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3683{
3684 ractor_unsafe_check();
3686 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3687}
3688
3689static VALUE
3690call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3691{
3692 ractor_unsafe_check();
3694 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3695}
3696
3697static VALUE
3698call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3699{
3700 ractor_unsafe_check();
3702 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3703}
3704
3705static VALUE
3706call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3707{
3708 ractor_unsafe_check();
3710 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3711}
3712
3713static VALUE
3714ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3715{
3716 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3717 return (*f)(recv, rb_ary_new4(argc, argv));
3718}
3719
3720static VALUE
3721ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3722{
3723 VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3724 return (*f)(argc, argv, recv);
3725}
3726
3727static VALUE
3728ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3729{
3730 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3731 return (*f)(recv);
3732}
3733
3734static VALUE
3735ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3736{
3737 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3738 return (*f)(recv, argv[0]);
3739}
3740
3741static VALUE
3742ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3743{
3744 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3745 return (*f)(recv, argv[0], argv[1]);
3746}
3747
3748static VALUE
3749ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3750{
3751 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3752 return (*f)(recv, argv[0], argv[1], argv[2]);
3753}
3754
3755static VALUE
3756ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3757{
3758 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3759 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3760}
3761
3762static VALUE
3763ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3764{
3765 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3766 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3767}
3768
3769static VALUE
3770ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3771{
3773 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3774}
3775
3776static VALUE
3777ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3778{
3780 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3781}
3782
3783static VALUE
3784ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3785{
3787 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3788}
3789
3790static VALUE
3791ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3792{
3794 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3795}
3796
3797static VALUE
3798ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3799{
3801 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3802}
3803
3804static VALUE
3805ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3806{
3808 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3809}
3810
3811static VALUE
3812ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3813{
3815 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3816}
3817
3818static VALUE
3819ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3820{
3822 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3823}
3824
3825static VALUE
3826ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3827{
3829 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3830}
3831
3832static VALUE
3833ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3834{
3836 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3837}
3838
3839static inline int
3840vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3841{
3842 const int ov_flags = RAISED_STACKOVERFLOW;
3843 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3844 if (rb_ec_raised_p(ec, ov_flags)) {
3845 rb_ec_raised_reset(ec, ov_flags);
3846 return TRUE;
3847 }
3848 return FALSE;
3849}
3850
3851#define CHECK_CFP_CONSISTENCY(func) \
3852 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3853 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3854
3855static inline
3856const rb_method_cfunc_t *
3857vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3858{
3859#if VM_DEBUG_VERIFY_METHOD_CACHE
3860 switch (me->def->type) {
3861 case VM_METHOD_TYPE_CFUNC:
3862 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3863 break;
3864# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3865 METHOD_BUG(ISEQ);
3866 METHOD_BUG(ATTRSET);
3867 METHOD_BUG(IVAR);
3868 METHOD_BUG(BMETHOD);
3869 METHOD_BUG(ZSUPER);
3870 METHOD_BUG(UNDEF);
3871 METHOD_BUG(OPTIMIZED);
3872 METHOD_BUG(MISSING);
3873 METHOD_BUG(REFINED);
3874 METHOD_BUG(ALIAS);
3875# undef METHOD_BUG
3876 default:
3877 rb_bug("wrong method type: %d", me->def->type);
3878 }
3879#endif
3880 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3881}
3882
3883static VALUE
3884vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3885 int argc, VALUE *argv, VALUE *stack_bottom)
3886{
3887 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3888 const struct rb_callinfo *ci = calling->cd->ci;
3889 const struct rb_callcache *cc = calling->cc;
3890 VALUE val;
3891 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3892 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3893
3894 VALUE recv = calling->recv;
3895 VALUE block_handler = calling->block_handler;
3896 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3897
3898 if (UNLIKELY(calling->kw_splat)) {
3899 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3900 }
3901
3902 VM_ASSERT(reg_cfp == ec->cfp);
3903
3904 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3905 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3906
3907 vm_push_frame(ec, NULL, frame_type, recv,
3908 block_handler, (VALUE)me,
3909 0, ec->cfp->sp, 0, 0);
3910
3911 int len = cfunc->argc;
3912 if (len >= 0) rb_check_arity(argc, len, len);
3913
3914 reg_cfp->sp = stack_bottom;
3915 val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3916
3917 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3918
3919 rb_vm_pop_frame(ec);
3920
3921 VM_ASSERT(ec->cfp->sp == stack_bottom);
3922
3923 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3924 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3925
3926 return val;
3927}
3928
3929// Push a C method frame for a given cme. This is called when JIT code skipped
3930// pushing a frame but the C method reached a point where a frame is needed.
3931void
3932rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3933{
3934 VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3935 rb_execution_context_t *ec = GET_EC();
3936 VALUE *sp = ec->cfp->sp;
3937 VALUE recv = *(sp - recv_idx - 1);
3938 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3939 VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3940#if VM_CHECK_MODE > 0
3941 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3942 *(GET_EC()->cfp->sp) = Qfalse;
3943#endif
3944 vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3945}
3946
3947// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3948bool
3949rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3950{
3951 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3952}
3953
3954static VALUE
3955vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3956{
3957 int argc = calling->argc;
3958 VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3959 VALUE *argv = &stack_bottom[1];
3960
3961 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3962}
3963
3964static VALUE
3965vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3966{
3967 const struct rb_callinfo *ci = calling->cd->ci;
3968 RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3969
3970 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3971 VALUE argv_ary;
3972 if (UNLIKELY(argv_ary = calling->heap_argv)) {
3973 VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3974 int argc = RARRAY_LENINT(argv_ary);
3975 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3976 VALUE *stack_bottom = reg_cfp->sp - 2;
3977
3978 VM_ASSERT(calling->argc == 1);
3979 VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3980 VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3981
3982 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3983 }
3984 else {
3985 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3986
3987 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3988 }
3989}
3990
3991static inline VALUE
3992vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3993{
3994 VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3995 int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3996
3997 if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3998 return vm_call_cfunc_other(ec, reg_cfp, calling);
3999 }
4000
4001 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4002 calling->kw_splat = 0;
4003 int i;
4004 VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
4005 VALUE *sp = stack_bottom;
4006 CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
4007 for(i = 0; i < argc; i++) {
4008 *++sp = argv[i];
4009 }
4010 reg_cfp->sp = sp+1;
4011
4012 return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
4013}
4014
4015static inline VALUE
4016vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4017{
4018 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
4019 VALUE argv_ary = reg_cfp->sp[-1];
4020 int argc = RARRAY_LENINT(argv_ary);
4021 VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
4022 VALUE last_hash;
4023 int argc_offset = 0;
4024
4025 if (UNLIKELY(argc > 0 &&
4026 RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
4027 (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
4028 if (!RHASH_EMPTY_P(last_hash)) {
4029 return vm_call_cfunc_other(ec, reg_cfp, calling);
4030 }
4031 argc_offset++;
4032 }
4033 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
4034}
4035
4036static inline VALUE
4037vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4038{
4039 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
4040 VALUE keyword_hash = reg_cfp->sp[-1];
4041
4042 if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
4043 return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
4044 }
4045
4046 return vm_call_cfunc_other(ec, reg_cfp, calling);
4047}
4048
4049static VALUE
4050vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4051{
4052 const struct rb_callinfo *ci = calling->cd->ci;
4053 RB_DEBUG_COUNTER_INC(ccf_cfunc);
4054
4055 if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4056 if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
4057 // f(*a)
4058 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
4059 return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
4060 }
4061 if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
4062 // f(*a, **kw)
4063 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
4064 return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
4065 }
4066 }
4067
4068 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
4069 return vm_call_cfunc_other(ec, reg_cfp, calling);
4070}
4071
4072static VALUE
4073vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4074{
4075 const struct rb_callcache *cc = calling->cc;
4076 RB_DEBUG_COUNTER_INC(ccf_ivar);
4077 cfp->sp -= 1;
4078 VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
4079 return ivar;
4080}
4081
4082static VALUE
4083vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
4084{
4085 RB_DEBUG_COUNTER_INC(ccf_attrset);
4086 VALUE val = *(cfp->sp - 1);
4087 cfp->sp -= 2;
4088 attr_index_t index;
4089 shape_id_t dest_shape_id;
4090 vm_cc_atomic_shape_and_index(cc, &dest_shape_id, &index);
4091 ID id = vm_cc_cme(cc)->def->body.attr.id;
4092 rb_check_frozen(obj);
4093 VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
4094 if (UNDEF_P(res)) {
4095 switch (BUILTIN_TYPE(obj)) {
4096 case T_OBJECT:
4097 break;
4098 case T_CLASS:
4099 case T_MODULE:
4100 {
4101 res = vm_setivar_class(obj, id, val, dest_shape_id, index);
4102 if (!UNDEF_P(res)) {
4103 return res;
4104 }
4105 }
4106 break;
4107 default:
4108 {
4109 res = vm_setivar_default(obj, id, val, dest_shape_id, index);
4110 if (!UNDEF_P(res)) {
4111 return res;
4112 }
4113 }
4114 }
4115 res = vm_setivar_slowpath_attr(obj, id, val, cc);
4116 }
4117 return res;
4118}
4119
4120static VALUE
4121vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4122{
4123 return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
4124}
4125
4126static inline VALUE
4127vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4128{
4129 rb_proc_t *proc;
4130 VALUE val;
4131 const struct rb_callcache *cc = calling->cc;
4132 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4133 VALUE procv = cme->def->body.bmethod.proc;
4134
4135 if (!RB_OBJ_SHAREABLE_P(procv) &&
4136 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4137 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4138 }
4139
4140 /* control block frame */
4141 GetProcPtr(procv, proc);
4142 val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4143
4144 return val;
4145}
4146
4147static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4148
4149static VALUE
4150vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4151{
4152 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4153
4154 const struct rb_callcache *cc = calling->cc;
4155 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4156 VALUE procv = cme->def->body.bmethod.proc;
4157
4158 if (!RB_OBJ_SHAREABLE_P(procv) &&
4159 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4160 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4161 }
4162
4163 rb_proc_t *proc;
4164 GetProcPtr(procv, proc);
4165 const struct rb_block *block = &proc->block;
4166
4167 while (vm_block_type(block) == block_type_proc) {
4168 block = vm_proc_block(block->as.proc);
4169 }
4170 VM_ASSERT(vm_block_type(block) == block_type_iseq);
4171
4172 const struct rb_captured_block *captured = &block->as.captured;
4173 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4174 VALUE * const argv = cfp->sp - calling->argc;
4175 const int arg_size = ISEQ_BODY(iseq)->param.size;
4176
4177 int opt_pc;
4178 if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4179 opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4180 }
4181 else {
4182 opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4183 }
4184
4185 cfp->sp = argv - 1; // -1 for the receiver
4186
4187 vm_push_frame(ec, iseq,
4188 VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4189 calling->recv,
4190 VM_GUARDED_PREV_EP(captured->ep),
4191 (VALUE)cme,
4192 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4193 argv + arg_size,
4194 ISEQ_BODY(iseq)->local_table_size - arg_size,
4195 ISEQ_BODY(iseq)->stack_max);
4196
4197 return Qundef;
4198}
4199
4200static VALUE
4201vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4202{
4203 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4204
4205 VALUE *argv;
4206 int argc;
4207 CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4208 if (UNLIKELY(calling->heap_argv)) {
4209 argv = RARRAY_PTR(calling->heap_argv);
4210 cfp->sp -= 2;
4211 }
4212 else {
4213 argc = calling->argc;
4214 argv = ALLOCA_N(VALUE, argc);
4215 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4216 cfp->sp += - argc - 1;
4217 }
4218
4219 return vm_call_bmethod_body(ec, calling, argv);
4220}
4221
4222static VALUE
4223vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4224{
4225 RB_DEBUG_COUNTER_INC(ccf_bmethod);
4226
4227 const struct rb_callcache *cc = calling->cc;
4228 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4229 VALUE procv = cme->def->body.bmethod.proc;
4230 rb_proc_t *proc;
4231 GetProcPtr(procv, proc);
4232 const struct rb_block *block = &proc->block;
4233
4234 while (vm_block_type(block) == block_type_proc) {
4235 block = vm_proc_block(block->as.proc);
4236 }
4237 if (vm_block_type(block) == block_type_iseq) {
4238 CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4239 return vm_call_iseq_bmethod(ec, cfp, calling);
4240 }
4241
4242 CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4243 return vm_call_noniseq_bmethod(ec, cfp, calling);
4244}
4245
4246VALUE
4247rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4248{
4249 VALUE klass = current_class;
4250
4251 /* for prepended Module, then start from cover class */
4252 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
4253 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4254 klass = RBASIC_CLASS(klass);
4255 }
4256
4257 while (RTEST(klass)) {
4258 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4259 if (owner == target_owner) {
4260 return klass;
4261 }
4262 klass = RCLASS_SUPER(klass);
4263 }
4264
4265 return current_class; /* maybe module function */
4266}
4267
4268static const rb_callable_method_entry_t *
4269aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4270{
4271 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4272 const rb_callable_method_entry_t *cme;
4273
4274 if (orig_me->defined_class == 0) {
4275 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4276 VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4277 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4278
4279 if (me->def->reference_count == 1) {
4280 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4281 }
4282 else {
4284 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4285 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4286 }
4287 }
4288 else {
4289 cme = (const rb_callable_method_entry_t *)orig_me;
4290 }
4291
4292 VM_ASSERT(callable_method_entry_p(cme));
4293 return cme;
4294}
4295
4297rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4298{
4299 return aliased_callable_method_entry(me);
4300}
4301
4302static VALUE
4303vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4304{
4305 calling->cc = &VM_CC_ON_STACK(Qundef,
4306 vm_call_general,
4307 {{0}},
4308 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4309
4310 return vm_call_method_each_type(ec, cfp, calling);
4311}
4312
4313static enum method_missing_reason
4314ci_missing_reason(const struct rb_callinfo *ci)
4315{
4316 enum method_missing_reason stat = MISSING_NOENTRY;
4317 if (vm_ci_flag(ci) & VM_CALL_VCALL && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) stat |= MISSING_VCALL;
4318 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4319 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4320 return stat;
4321}
4322
4323static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4324
4325static VALUE
4326vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4327 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4328{
4329 ASSUME(calling->argc >= 0);
4330
4331 enum method_missing_reason missing_reason = MISSING_NOENTRY;
4332 int argc = calling->argc;
4333 VALUE recv = calling->recv;
4334 VALUE klass = CLASS_OF(recv);
4335 ID mid = rb_check_id(&symbol);
4336 flags |= VM_CALL_OPT_SEND;
4337
4338 if (UNLIKELY(! mid)) {
4339 mid = idMethodMissing;
4340 missing_reason = ci_missing_reason(ci);
4341 ec->method_missing_reason = missing_reason;
4342
4343 VALUE argv_ary;
4344 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4345 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4346 rb_ary_unshift(argv_ary, symbol);
4347
4348 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4349 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4350 VALUE exc = rb_make_no_method_exception(
4351 rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4352
4353 rb_exc_raise(exc);
4354 }
4355 rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4356 }
4357 else {
4358 /* E.g. when argc == 2
4359 *
4360 * | | | | TOPN
4361 * | | +------+
4362 * | | +---> | arg1 | 0
4363 * +------+ | +------+
4364 * | arg1 | -+ +-> | arg0 | 1
4365 * +------+ | +------+
4366 * | arg0 | ---+ | sym | 2
4367 * +------+ +------+
4368 * | recv | | recv | 3
4369 * --+------+--------+------+------
4370 */
4371 int i = argc;
4372 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4373 INC_SP(1);
4374 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4375 argc = ++calling->argc;
4376
4377 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4378 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4379 TOPN(i) = symbol;
4380 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4381 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4382 VALUE exc = rb_make_no_method_exception(
4383 rb_eNoMethodError, 0, recv, argc, argv, priv);
4384
4385 rb_exc_raise(exc);
4386 }
4387 else {
4388 TOPN(i) = rb_str_intern(symbol);
4389 }
4390 }
4391 }
4392
4393 struct rb_forwarding_call_data new_fcd = {
4394 .cd = {
4395 .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4396 .cc = NULL,
4397 },
4398 .caller_ci = NULL,
4399 };
4400
4401 if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4402 calling->cd = &new_fcd.cd;
4403 }
4404 else {
4405 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4406 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4407 new_fcd.caller_ci = caller_ci;
4408 calling->cd = (struct rb_call_data *)&new_fcd;
4409 }
4410 calling->cc = &VM_CC_ON_STACK(klass,
4411 vm_call_general,
4412 { .method_missing_reason = missing_reason },
4413 rb_callable_method_entry_with_refinements(klass, mid, NULL));
4414
4415 if (flags & VM_CALL_FCALL) {
4416 return vm_call_method(ec, reg_cfp, calling);
4417 }
4418
4419 const struct rb_callcache *cc = calling->cc;
4420 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4421
4422 if (vm_cc_cme(cc) != NULL) {
4423 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4424 case METHOD_VISI_PUBLIC: /* likely */
4425 return vm_call_method_each_type(ec, reg_cfp, calling);
4426 case METHOD_VISI_PRIVATE:
4427 vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4428 break;
4429 case METHOD_VISI_PROTECTED:
4430 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4431 break;
4432 default:
4433 VM_UNREACHABLE(vm_call_method);
4434 }
4435 return vm_call_method_missing(ec, reg_cfp, calling);
4436 }
4437
4438 return vm_call_method_nome(ec, reg_cfp, calling);
4439}
4440
4441static VALUE
4442vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4443{
4444 const struct rb_callinfo *ci = calling->cd->ci;
4445 int i;
4446 VALUE sym;
4447
4448 i = calling->argc - 1;
4449
4450 if (calling->argc == 0) {
4451 rb_raise(rb_eArgError, "no method name given");
4452 }
4453
4454 sym = TOPN(i);
4455 /* E.g. when i == 2
4456 *
4457 * | | | | TOPN
4458 * +------+ | |
4459 * | arg1 | ---+ | | 0
4460 * +------+ | +------+
4461 * | arg0 | -+ +-> | arg1 | 1
4462 * +------+ | +------+
4463 * | sym | +---> | arg0 | 2
4464 * +------+ +------+
4465 * | recv | | recv | 3
4466 * --+------+--------+------+------
4467 */
4468 /* shift arguments */
4469 if (i > 0) {
4470 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4471 }
4472 calling->argc -= 1;
4473 DEC_SP(1);
4474
4475 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4476}
4477
4478static VALUE
4479vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4480{
4481 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4482 const struct rb_callinfo *ci = calling->cd->ci;
4483 int flags = VM_CALL_FCALL;
4484 VALUE sym;
4485
4486 VALUE argv_ary;
4487 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4488 if (UNLIKELY(argv_ary = calling->heap_argv)) {
4489 sym = rb_ary_shift(argv_ary);
4490 flags |= VM_CALL_ARGS_SPLAT;
4491 if (calling->kw_splat) {
4492 VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4493 ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4494 calling->kw_splat = 0;
4495 }
4496 return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4497 }
4498
4499 if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4500 return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4501}
4502
4503static VALUE
4504vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4505{
4506 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4507 return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4508}
4509
4510static VALUE
4511vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4512{
4513 RB_DEBUG_COUNTER_INC(ccf_opt_send);
4514
4515 const struct rb_callinfo *ci = calling->cd->ci;
4516 int flags = vm_ci_flag(ci);
4517
4518 if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4519 ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4520 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4521 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4522 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4523 return vm_call_opt_send_complex(ec, reg_cfp, calling);
4524 }
4525
4526 CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4527 return vm_call_opt_send_simple(ec, reg_cfp, calling);
4528}
4529
4530static VALUE
4531vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4532 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4533{
4534 RB_DEBUG_COUNTER_INC(ccf_method_missing);
4535
4536 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4537 unsigned int argc, flag;
4538
4539 flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4540 argc = ++calling->argc;
4541
4542 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4543 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4544 vm_check_canary(ec, reg_cfp->sp);
4545 if (argc > 1) {
4546 MEMMOVE(argv+1, argv, VALUE, argc-1);
4547 }
4548 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4549 INC_SP(1);
4550
4551 ec->method_missing_reason = reason;
4552
4553 struct rb_forwarding_call_data new_fcd = {
4554 .cd = {
4555 .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4556 .cc = NULL,
4557 },
4558 .caller_ci = NULL,
4559 };
4560
4561 if (!(flag & VM_CALL_FORWARDING)) {
4562 calling->cd = &new_fcd.cd;
4563 }
4564 else {
4565 const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4566 VM_ASSERT((vm_ci_argc(caller_ci), 1));
4567 new_fcd.caller_ci = caller_ci;
4568 calling->cd = (struct rb_call_data *)&new_fcd;
4569 }
4570
4571 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4572 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4573 return vm_call_method(ec, reg_cfp, calling);
4574}
4575
4576static VALUE
4577vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4578{
4579 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4580}
4581
4582static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4583static VALUE
4584vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4585{
4586 klass = RCLASS_SUPER(klass);
4587
4588 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4589 if (cme == NULL) {
4590 return vm_call_method_nome(ec, cfp, calling);
4591 }
4592 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4593 cme->def->body.refined.orig_me) {
4594 cme = refined_method_callable_without_refinement(cme);
4595 }
4596
4597 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4598
4599 return vm_call_method_each_type(ec, cfp, calling);
4600}
4601
4602static inline VALUE
4603find_refinement(VALUE refinements, VALUE klass)
4604{
4605 if (NIL_P(refinements)) {
4606 return Qnil;
4607 }
4608 return rb_hash_lookup(refinements, klass);
4609}
4610
4611PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4612static rb_control_frame_t *
4613current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4614{
4615 rb_control_frame_t *top_cfp = cfp;
4616
4617 if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4618 const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4619
4620 do {
4621 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4622 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4623 /* TODO: orphan block */
4624 return top_cfp;
4625 }
4626 } while (cfp->iseq != local_iseq);
4627 }
4628 return cfp;
4629}
4630
4631static const rb_callable_method_entry_t *
4632refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4633{
4634 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4635 const rb_callable_method_entry_t *cme;
4636
4637 if (orig_me->defined_class == 0) {
4638 cme = NULL;
4640 }
4641 else {
4642 cme = (const rb_callable_method_entry_t *)orig_me;
4643 }
4644
4645 VM_ASSERT(callable_method_entry_p(cme));
4646
4647 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4648 cme = NULL;
4649 }
4650
4651 return cme;
4652}
4653
4654static const rb_callable_method_entry_t *
4655search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4656{
4657 ID mid = vm_ci_mid(calling->cd->ci);
4658 const rb_cref_t *cref = vm_get_cref(cfp->ep);
4659 const struct rb_callcache * const cc = calling->cc;
4660 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4661
4662 for (; cref; cref = CREF_NEXT(cref)) {
4663 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4664 if (NIL_P(refinement)) continue;
4665
4666 const rb_callable_method_entry_t *const ref_me =
4667 rb_callable_method_entry(refinement, mid);
4668
4669 if (ref_me) {
4670 if (vm_cc_call(cc) == vm_call_super_method) {
4671 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4672 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4673 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4674 continue;
4675 }
4676 }
4677
4678 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4679 cme->def != ref_me->def) {
4680 cme = ref_me;
4681 }
4682 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4683 return cme;
4684 }
4685 }
4686 else {
4687 return NULL;
4688 }
4689 }
4690
4691 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4692 return refined_method_callable_without_refinement(vm_cc_cme(cc));
4693 }
4694 else {
4695 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4696 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4697 return cme;
4698 }
4699}
4700
4701static VALUE
4702vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4703{
4704 const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4705
4706 if (ref_cme) {
4707 if (calling->cd->cc) {
4708 const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4709 RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4710 return vm_call_method(ec, cfp, calling);
4711 }
4712 else {
4713 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4714 calling->cc= ref_cc;
4715 return vm_call_method(ec, cfp, calling);
4716 }
4717 }
4718 else {
4719 return vm_call_method_nome(ec, cfp, calling);
4720 }
4721}
4722
4723static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4724
4725NOINLINE(static VALUE
4726 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4727 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4728
4729static VALUE
4730vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4731 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4732{
4733 int argc = calling->argc;
4734
4735 /* remove self */
4736 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4737 DEC_SP(1);
4738
4739 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4740}
4741
4742static VALUE
4743vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4744{
4745 RB_DEBUG_COUNTER_INC(ccf_opt_call);
4746
4747 const struct rb_callinfo *ci = calling->cd->ci;
4748 VALUE procval = calling->recv;
4749 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4750}
4751
4752static VALUE
4753vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4754{
4755 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4756
4757 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4758 const struct rb_callinfo *ci = calling->cd->ci;
4759
4760 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4761 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4762 }
4763 else {
4764 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4765 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4766 return vm_call_general(ec, reg_cfp, calling);
4767 }
4768}
4769
4770static VALUE
4771vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4772{
4773 VALUE recv = calling->recv;
4774
4775 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4776 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4777 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4778
4779 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4780 return internal_RSTRUCT_GET(recv, off);
4781}
4782
4783static VALUE
4784vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4785{
4786 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4787
4788 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4789 reg_cfp->sp -= 1;
4790 return ret;
4791}
4792
4793static VALUE
4794vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4795{
4796 VALUE recv = calling->recv;
4797
4798 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4799 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4800 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4801
4802 rb_check_frozen(recv);
4803
4804 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4805 internal_RSTRUCT_SET(recv, off, val);
4806
4807 return val;
4808}
4809
4810static VALUE
4811vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4812{
4813 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4814
4815 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4816 reg_cfp->sp -= 2;
4817 return ret;
4818}
4819
4820NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4821 const struct rb_callinfo *ci, const struct rb_callcache *cc));
4822
4823#define VM_CALL_METHOD_ATTR(var, func, nohook) \
4824 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4825 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4826 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4827 var = func; \
4828 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4829 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4830 } \
4831 else { \
4832 nohook; \
4833 var = func; \
4834 }
4835
4836static VALUE
4837vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4838 const struct rb_callinfo *ci, const struct rb_callcache *cc)
4839{
4840 switch (vm_cc_cme(cc)->def->body.optimized.type) {
4841 case OPTIMIZED_METHOD_TYPE_SEND:
4842 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4843 return vm_call_opt_send(ec, cfp, calling);
4844 case OPTIMIZED_METHOD_TYPE_CALL:
4845 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4846 return vm_call_opt_call(ec, cfp, calling);
4847 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4848 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4849 return vm_call_opt_block_call(ec, cfp, calling);
4850 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4851 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4852 rb_check_arity(calling->argc, 0, 0);
4853
4854 VALUE v;
4855 VM_CALL_METHOD_ATTR(v,
4856 vm_call_opt_struct_aref(ec, cfp, calling),
4857 set_vm_cc_ivar(cc); \
4858 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4859 return v;
4860 }
4861 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4862 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4863 rb_check_arity(calling->argc, 1, 1);
4864
4865 VALUE v;
4866 VM_CALL_METHOD_ATTR(v,
4867 vm_call_opt_struct_aset(ec, cfp, calling),
4868 set_vm_cc_ivar(cc); \
4869 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4870 return v;
4871 }
4872 default:
4873 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4874 }
4875}
4876
4877static VALUE
4878vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4879{
4880 const struct rb_callinfo *ci = calling->cd->ci;
4881 const struct rb_callcache *cc = calling->cc;
4882 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4883 VALUE v;
4884
4885 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4886
4887 switch (cme->def->type) {
4888 case VM_METHOD_TYPE_ISEQ:
4889 if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4890 CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4891 return vm_call_iseq_fwd_setup(ec, cfp, calling);
4892 }
4893 else {
4894 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4895 return vm_call_iseq_setup(ec, cfp, calling);
4896 }
4897
4898 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4899 case VM_METHOD_TYPE_CFUNC:
4900 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4901 return vm_call_cfunc(ec, cfp, calling);
4902
4903 case VM_METHOD_TYPE_ATTRSET:
4904 CALLER_SETUP_ARG(cfp, calling, ci, 1);
4905
4906 rb_check_arity(calling->argc, 1, 1);
4907
4908 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4909
4910 if (vm_cc_markable(cc)) {
4911 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4912 VM_CALL_METHOD_ATTR(v,
4913 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4914 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4915 }
4916 else {
4917 cc = &((struct rb_callcache) {
4918 .flags = T_IMEMO |
4919 (imemo_callcache << FL_USHIFT) |
4920 VM_CALLCACHE_UNMARKABLE |
4921 VM_CALLCACHE_ON_STACK,
4922 .klass = cc->klass,
4923 .cme_ = cc->cme_,
4924 .call_ = cc->call_,
4925 .aux_ = {
4926 .attr = {
4927 .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET),
4928 }
4929 },
4930 });
4931
4932 VM_CALL_METHOD_ATTR(v,
4933 vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4934 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4935 }
4936 return v;
4937
4938 case VM_METHOD_TYPE_IVAR:
4939 CALLER_SETUP_ARG(cfp, calling, ci, 0);
4940 rb_check_arity(calling->argc, 0, 0);
4941 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4942 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4943 VM_CALL_METHOD_ATTR(v,
4944 vm_call_ivar(ec, cfp, calling),
4945 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4946 return v;
4947
4948 case VM_METHOD_TYPE_MISSING:
4949 vm_cc_method_missing_reason_set(cc, 0);
4950 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4951 return vm_call_method_missing(ec, cfp, calling);
4952
4953 case VM_METHOD_TYPE_BMETHOD:
4954 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4955 return vm_call_bmethod(ec, cfp, calling);
4956
4957 case VM_METHOD_TYPE_ALIAS:
4958 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4959 return vm_call_alias(ec, cfp, calling);
4960
4961 case VM_METHOD_TYPE_OPTIMIZED:
4962 return vm_call_optimized(ec, cfp, calling, ci, cc);
4963
4964 case VM_METHOD_TYPE_UNDEF:
4965 break;
4966
4967 case VM_METHOD_TYPE_ZSUPER:
4968 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4969
4970 case VM_METHOD_TYPE_REFINED:
4971 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4972 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4973 return vm_call_refined(ec, cfp, calling);
4974 }
4975
4976 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4977}
4978
4979NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4980
4981static VALUE
4982vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4983{
4984 /* method missing */
4985 const struct rb_callinfo *ci = calling->cd->ci;
4986 const int stat = ci_missing_reason(ci);
4987
4988 if (vm_ci_mid(ci) == idMethodMissing) {
4989 if (UNLIKELY(calling->heap_argv)) {
4990 vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4991 }
4992 else {
4993 rb_control_frame_t *reg_cfp = cfp;
4994 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4995 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4996 }
4997 }
4998 else {
4999 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
5000 }
5001}
5002
5003/* Protected method calls and super invocations need to check that the receiver
5004 * (self for super) inherits the module on which the method is defined.
5005 * In the case of refinements, it should consider the original class not the
5006 * refinement.
5007 */
5008static VALUE
5009vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
5010{
5011 VALUE defined_class = me->defined_class;
5012 VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
5013 return NIL_P(refined_class) ? defined_class : refined_class;
5014}
5015
5016static inline VALUE
5017vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
5018{
5019 const struct rb_callinfo *ci = calling->cd->ci;
5020 const struct rb_callcache *cc = calling->cc;
5021
5022 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
5023
5024 if (vm_cc_cme(cc) != NULL) {
5025 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
5026 case METHOD_VISI_PUBLIC: /* likely */
5027 return vm_call_method_each_type(ec, cfp, calling);
5028
5029 case METHOD_VISI_PRIVATE:
5030 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
5031 enum method_missing_reason stat = MISSING_PRIVATE;
5032 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
5033
5034 vm_cc_method_missing_reason_set(cc, stat);
5035 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
5036 return vm_call_method_missing(ec, cfp, calling);
5037 }
5038 return vm_call_method_each_type(ec, cfp, calling);
5039
5040 case METHOD_VISI_PROTECTED:
5041 if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
5042 VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
5043 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
5044 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
5045 return vm_call_method_missing(ec, cfp, calling);
5046 }
5047 else {
5048 /* caching method info to dummy cc */
5049 VM_ASSERT(vm_cc_cme(cc) != NULL);
5050 struct rb_callcache cc_on_stack = *cc;
5051 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
5052 calling->cc = &cc_on_stack;
5053 return vm_call_method_each_type(ec, cfp, calling);
5054 }
5055 }
5056 return vm_call_method_each_type(ec, cfp, calling);
5057
5058 default:
5059 rb_bug("unreachable");
5060 }
5061 }
5062 else {
5063 return vm_call_method_nome(ec, cfp, calling);
5064 }
5065}
5066
5067static VALUE
5068vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5069{
5070 RB_DEBUG_COUNTER_INC(ccf_general);
5071 return vm_call_method(ec, reg_cfp, calling);
5072}
5073
5074void
5075rb_vm_cc_general(const struct rb_callcache *cc)
5076{
5077 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
5078 VM_ASSERT(cc != vm_cc_empty());
5079
5080 *(vm_call_handler *)&cc->call_ = vm_call_general;
5081}
5082
5083static VALUE
5084vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
5085{
5086 RB_DEBUG_COUNTER_INC(ccf_super_method);
5087
5088 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
5089 // can merge the function and the address of the function becomes same.
5090 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
5091 if (ec == NULL) rb_bug("unreachable");
5092
5093 /* this check is required to distinguish with other functions. */
5094 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
5095 return vm_call_method(ec, reg_cfp, calling);
5096}
5097
5098/* super */
5099
5100static inline VALUE
5101vm_search_normal_superclass(VALUE klass)
5102{
5103 if (BUILTIN_TYPE(klass) == T_ICLASS &&
5104 RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
5105 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
5106 klass = RBASIC(klass)->klass;
5107 }
5108 klass = RCLASS_ORIGIN(klass);
5109 return RCLASS_SUPER(klass);
5110}
5111
5112NORETURN(static void vm_super_outside(void));
5113
5114static void
5115vm_super_outside(void)
5116{
5117 rb_raise(rb_eNoMethodError, "super called outside of method");
5118}
5119
5120static const struct rb_callcache *
5121empty_cc_for_super(void)
5122{
5123 return &vm_empty_cc_for_super;
5124}
5125
5126static const struct rb_callcache *
5127vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5128{
5129 VALUE current_defined_class;
5130 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5131
5132 if (!me) {
5133 vm_super_outside();
5134 }
5135
5136 current_defined_class = vm_defined_class_for_protected_call(me);
5137
5138 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5139 reg_cfp->iseq != method_entry_iseqptr(me) &&
5140 !rb_obj_is_kind_of(recv, current_defined_class)) {
5141 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5142 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5143
5144 if (m) { /* not bound UnboundMethod */
5145 rb_raise(rb_eTypeError,
5146 "self has wrong type to call super in this context: "
5147 "%"PRIsVALUE" (expected %"PRIsVALUE")",
5148 rb_obj_class(recv), m);
5149 }
5150 }
5151
5152 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5153 rb_raise(rb_eRuntimeError,
5154 "implicit argument passing of super from method defined"
5155 " by define_method() is not supported."
5156 " Specify all arguments explicitly.");
5157 }
5158
5159 ID mid = me->def->original_id;
5160
5161 if (!vm_ci_markable(cd->ci)) {
5162 VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5163 }
5164 else {
5165 // update iseq. really? (TODO)
5166 cd->ci = vm_ci_new_runtime(mid,
5167 vm_ci_flag(cd->ci),
5168 vm_ci_argc(cd->ci),
5169 vm_ci_kwarg(cd->ci));
5170
5171 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5172 }
5173
5174 const struct rb_callcache *cc;
5175
5176 VALUE klass = vm_search_normal_superclass(me->defined_class);
5177
5178 if (!klass) {
5179 /* bound instance method of module */
5180 cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5181 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5182 }
5183 else {
5184 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5185 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5186
5187 // define_method can cache for different method id
5188 if (cached_cme == NULL) {
5189 // empty_cc_for_super is not markable object
5190 cd->cc = empty_cc_for_super();
5191 }
5192 else if (cached_cme->called_id != mid) {
5193 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5194 if (cme) {
5195 cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5196 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5197 }
5198 else {
5199 cd->cc = cc = empty_cc_for_super();
5200 }
5201 }
5202 else {
5203 switch (cached_cme->def->type) {
5204 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5205 case VM_METHOD_TYPE_REFINED:
5206 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5207 case VM_METHOD_TYPE_ATTRSET:
5208 case VM_METHOD_TYPE_IVAR:
5209 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5210 break;
5211 default:
5212 break; // use fastpath
5213 }
5214 }
5215 }
5216
5217 VM_ASSERT((vm_cc_cme(cc), true));
5218
5219 return cc;
5220}
5221
5222/* yield */
5223
5224static inline int
5225block_proc_is_lambda(const VALUE procval)
5226{
5227 rb_proc_t *proc;
5228
5229 if (procval) {
5230 GetProcPtr(procval, proc);
5231 return proc->is_lambda;
5232 }
5233 else {
5234 return 0;
5235 }
5236}
5237
5238static VALUE
5239vm_yield_with_cfunc(rb_execution_context_t *ec,
5240 const struct rb_captured_block *captured,
5241 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5243{
5244 int is_lambda = FALSE; /* TODO */
5245 VALUE val, arg, blockarg;
5246 int frame_flag;
5247 const struct vm_ifunc *ifunc = captured->code.ifunc;
5248
5249 if (is_lambda) {
5250 arg = rb_ary_new4(argc, argv);
5251 }
5252 else if (argc == 0) {
5253 arg = Qnil;
5254 }
5255 else {
5256 arg = argv[0];
5257 }
5258
5259 blockarg = rb_vm_bh_to_procval(ec, block_handler);
5260
5261 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5262 if (kw_splat) {
5263 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5264 }
5265
5266 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5267 frame_flag,
5268 self,
5269 VM_GUARDED_PREV_EP(captured->ep),
5270 (VALUE)me,
5271 0, ec->cfp->sp, 0, 0);
5272 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5273 rb_vm_pop_frame(ec);
5274
5275 return val;
5276}
5277
5278VALUE
5279rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5280{
5281 return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5282}
5283
5284static VALUE
5285vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5286{
5287 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5288}
5289
5290static inline int
5291vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5292{
5293 int i;
5294 long len = RARRAY_LEN(ary);
5295
5296 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5297
5298 for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5299 argv[i] = RARRAY_AREF(ary, i);
5300 }
5301
5302 return i;
5303}
5304
5305static inline VALUE
5306vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5307{
5308 VALUE ary, arg0 = argv[0];
5309 ary = rb_check_array_type(arg0);
5310#if 0
5311 argv[0] = arg0;
5312#else
5313 VM_ASSERT(argv[0] == arg0);
5314#endif
5315 return ary;
5316}
5317
5318static int
5319vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5320{
5321 if (rb_simple_iseq_p(iseq)) {
5322 rb_control_frame_t *cfp = ec->cfp;
5323 VALUE arg0;
5324
5325 CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5326
5327 if (arg_setup_type == arg_setup_block &&
5328 calling->argc == 1 &&
5329 ISEQ_BODY(iseq)->param.flags.has_lead &&
5330 !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5331 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5332 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5333 }
5334
5335 if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5336 if (arg_setup_type == arg_setup_block) {
5337 if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5338 int i;
5339 CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5340 for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5341 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5342 }
5343 else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5344 calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5345 }
5346 }
5347 else {
5348 argument_arity_error(ec, iseq, NULL, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5349 }
5350 }
5351
5352 return 0;
5353 }
5354 else {
5355 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5356 }
5357}
5358
5359static int
5360vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5361{
5362 struct rb_calling_info calling_entry, *calling;
5363
5364 calling = &calling_entry;
5365 calling->argc = argc;
5366 calling->block_handler = block_handler;
5367 calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5368 calling->recv = Qundef;
5369 calling->heap_argv = 0;
5370 calling->cc = NULL;
5371 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5372
5373 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5374}
5375
5376/* ruby iseq -> ruby block */
5377
5378static VALUE
5379vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5380 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5381 bool is_lambda, VALUE block_handler)
5382{
5383 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5384 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5385 const int arg_size = ISEQ_BODY(iseq)->param.size;
5386 VALUE * const rsp = GET_SP() - calling->argc;
5387 VALUE * const argv = rsp;
5388 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5389 int frame_flag = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
5390
5391 SET_SP(rsp);
5392
5393 vm_push_frame(ec, iseq,
5394 frame_flag,
5395 captured->self,
5396 VM_GUARDED_PREV_EP(captured->ep), 0,
5397 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5398 rsp + arg_size,
5399 ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5400
5401 return Qundef;
5402}
5403
5404static VALUE
5405vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5406 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5407 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5408{
5409 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5410 int flags = vm_ci_flag(ci);
5411
5412 if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5413 ((calling->argc == 0) ||
5414 (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5415 (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5416 ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5417 CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5418 flags = 0;
5419 if (UNLIKELY(calling->heap_argv)) {
5420#if VM_ARGC_STACK_MAX < 0
5421 if (RARRAY_LEN(calling->heap_argv) < 1) {
5422 rb_raise(rb_eArgError, "no receiver given");
5423 }
5424#endif
5425 calling->recv = rb_ary_shift(calling->heap_argv);
5426 // Modify stack to avoid cfp consistency error
5427 reg_cfp->sp++;
5428 reg_cfp->sp[-1] = reg_cfp->sp[-2];
5429 reg_cfp->sp[-2] = calling->recv;
5430 flags |= VM_CALL_ARGS_SPLAT;
5431 }
5432 else {
5433 if (calling->argc < 1) {
5434 rb_raise(rb_eArgError, "no receiver given");
5435 }
5436 calling->recv = TOPN(--calling->argc);
5437 }
5438 if (calling->kw_splat) {
5439 flags |= VM_CALL_KW_SPLAT;
5440 }
5441 }
5442 else {
5443 if (calling->argc < 1) {
5444 rb_raise(rb_eArgError, "no receiver given");
5445 }
5446 calling->recv = TOPN(--calling->argc);
5447 }
5448
5449 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5450}
5451
5452static VALUE
5453vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5454 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5455 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5456{
5457 VALUE val;
5458 int argc;
5459 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5460 CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5461 argc = calling->argc;
5462 val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5463 POPN(argc); /* TODO: should put before C/yield? */
5464 return val;
5465}
5466
5467static VALUE
5468vm_proc_to_block_handler(VALUE procval)
5469{
5470 const struct rb_block *block = vm_proc_block(procval);
5471
5472 switch (vm_block_type(block)) {
5473 case block_type_iseq:
5474 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5475 case block_type_ifunc:
5476 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5477 case block_type_symbol:
5478 return VM_BH_FROM_SYMBOL(block->as.symbol);
5479 case block_type_proc:
5480 return VM_BH_FROM_PROC(block->as.proc);
5481 }
5482 VM_UNREACHABLE(vm_yield_with_proc);
5483 return Qundef;
5484}
5485
5486static VALUE
5487vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5488 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5489 bool is_lambda, VALUE block_handler)
5490{
5491 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5492 VALUE proc = VM_BH_TO_PROC(block_handler);
5493 is_lambda = block_proc_is_lambda(proc);
5494 block_handler = vm_proc_to_block_handler(proc);
5495 }
5496
5497 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5498}
5499
5500static inline VALUE
5501vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5502 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5503 bool is_lambda, VALUE block_handler)
5504{
5505 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5506 struct rb_calling_info *calling, const struct rb_callinfo *ci,
5507 bool is_lambda, VALUE block_handler);
5508
5509 switch (vm_block_handler_type(block_handler)) {
5510 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5511 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5512 case block_handler_type_proc: func = vm_invoke_proc_block; break;
5513 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5514 default: rb_bug("vm_invoke_block: unreachable");
5515 }
5516
5517 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5518}
5519
5520static VALUE
5521vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5522{
5523 const rb_execution_context_t *ec = GET_EC();
5524 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5525 struct rb_captured_block *captured;
5526
5527 if (cfp == 0) {
5528 rb_bug("vm_make_proc_with_iseq: unreachable");
5529 }
5530
5531 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5532 captured->code.iseq = blockiseq;
5533
5534 return rb_vm_make_proc(ec, captured, rb_cProc);
5535}
5536
5537static VALUE
5538vm_once_exec(VALUE iseq)
5539{
5540 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5541 return rb_proc_call_with_block(proc, 0, 0, Qnil);
5542}
5543
5544static VALUE
5545vm_once_clear(VALUE data)
5546{
5547 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5548 is->once.running_thread = NULL;
5549 return Qnil;
5550}
5551
5552/* defined insn */
5553
5554static bool
5555check_respond_to_missing(VALUE obj, VALUE v)
5556{
5557 VALUE args[2];
5558 VALUE r;
5559
5560 args[0] = obj; args[1] = Qfalse;
5561 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5562 if (!UNDEF_P(r) && RTEST(r)) {
5563 return true;
5564 }
5565 else {
5566 return false;
5567 }
5568}
5569
5570static bool
5571vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5572{
5573 VALUE klass;
5574 enum defined_type type = (enum defined_type)op_type;
5575
5576 switch (type) {
5577 case DEFINED_IVAR:
5578 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5579 break;
5580 case DEFINED_GVAR:
5581 return rb_gvar_defined(SYM2ID(obj));
5582 break;
5583 case DEFINED_CVAR: {
5584 const rb_cref_t *cref = vm_get_cref(GET_EP());
5585 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5586 return rb_cvar_defined(klass, SYM2ID(obj));
5587 break;
5588 }
5589 case DEFINED_CONST:
5590 case DEFINED_CONST_FROM: {
5591 bool allow_nil = type == DEFINED_CONST;
5592 klass = v;
5593 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5594 break;
5595 }
5596 case DEFINED_FUNC:
5597 klass = CLASS_OF(v);
5598 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5599 break;
5600 case DEFINED_METHOD:{
5601 VALUE klass = CLASS_OF(v);
5602 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5603
5604 if (me) {
5605 switch (METHOD_ENTRY_VISI(me)) {
5606 case METHOD_VISI_PRIVATE:
5607 break;
5608 case METHOD_VISI_PROTECTED:
5609 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5610 break;
5611 }
5612 case METHOD_VISI_PUBLIC:
5613 return true;
5614 break;
5615 default:
5616 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5617 }
5618 }
5619 else {
5620 return check_respond_to_missing(obj, v);
5621 }
5622 break;
5623 }
5624 case DEFINED_YIELD:
5625 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5626 return true;
5627 }
5628 break;
5629 case DEFINED_ZSUPER:
5630 {
5631 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5632
5633 if (me) {
5634 VALUE klass = vm_search_normal_superclass(me->defined_class);
5635 if (!klass) return false;
5636
5637 ID id = me->def->original_id;
5638
5639 return rb_method_boundp(klass, id, 0);
5640 }
5641 }
5642 break;
5643 case DEFINED_REF:
5644 return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5645 default:
5646 rb_bug("unimplemented defined? type (VM)");
5647 break;
5648 }
5649
5650 return false;
5651}
5652
5653bool
5654rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5655{
5656 return vm_defined(ec, reg_cfp, op_type, obj, v);
5657}
5658
5659static const VALUE *
5660vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5661{
5662 rb_num_t i;
5663 const VALUE *ep = reg_ep;
5664 for (i = 0; i < lv; i++) {
5665 ep = GET_PREV_EP(ep);
5666 }
5667 return ep;
5668}
5669
5670static VALUE
5671vm_get_special_object(const VALUE *const reg_ep,
5672 enum vm_special_object_type type)
5673{
5674 switch (type) {
5675 case VM_SPECIAL_OBJECT_VMCORE:
5676 return rb_mRubyVMFrozenCore;
5677 case VM_SPECIAL_OBJECT_CBASE:
5678 return vm_get_cbase(reg_ep);
5679 case VM_SPECIAL_OBJECT_CONST_BASE:
5680 return vm_get_const_base(reg_ep);
5681 default:
5682 rb_bug("putspecialobject insn: unknown value_type %d", type);
5683 }
5684}
5685
5686// ZJIT implementation is using the C function
5687// and needs to call a non-static function
5688VALUE
5689rb_vm_get_special_object(const VALUE *reg_ep, enum vm_special_object_type type)
5690{
5691 return vm_get_special_object(reg_ep, type);
5692}
5693
5694static VALUE
5695vm_concat_array(VALUE ary1, VALUE ary2st)
5696{
5697 const VALUE ary2 = ary2st;
5698 VALUE tmp1 = rb_check_to_array(ary1);
5699 VALUE tmp2 = rb_check_to_array(ary2);
5700
5701 if (NIL_P(tmp1)) {
5702 tmp1 = rb_ary_new3(1, ary1);
5703 }
5704 if (tmp1 == ary1) {
5705 tmp1 = rb_ary_dup(ary1);
5706 }
5707
5708 if (NIL_P(tmp2)) {
5709 return rb_ary_push(tmp1, ary2);
5710 }
5711 else {
5712 return rb_ary_concat(tmp1, tmp2);
5713 }
5714}
5715
5716static VALUE
5717vm_concat_to_array(VALUE ary1, VALUE ary2st)
5718{
5719 /* ary1 must be a newly created array */
5720 const VALUE ary2 = ary2st;
5721
5722 if (NIL_P(ary2)) return ary1;
5723
5724 VALUE tmp2 = rb_check_to_array(ary2);
5725
5726 if (NIL_P(tmp2)) {
5727 return rb_ary_push(ary1, ary2);
5728 }
5729 else {
5730 return rb_ary_concat(ary1, tmp2);
5731 }
5732}
5733
5734// YJIT implementation is using the C function
5735// and needs to call a non-static function
5736VALUE
5737rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5738{
5739 return vm_concat_array(ary1, ary2st);
5740}
5741
5742VALUE
5743rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5744{
5745 return vm_concat_to_array(ary1, ary2st);
5746}
5747
5748static VALUE
5749vm_splat_array(VALUE flag, VALUE ary)
5750{
5751 if (NIL_P(ary)) {
5752 return RTEST(flag) ? rb_ary_new() : rb_cArray_empty_frozen;
5753 }
5754 VALUE tmp = rb_check_to_array(ary);
5755 if (NIL_P(tmp)) {
5756 return rb_ary_new3(1, ary);
5757 }
5758 else if (RTEST(flag)) {
5759 return rb_ary_dup(tmp);
5760 }
5761 else {
5762 return tmp;
5763 }
5764}
5765
5766// YJIT implementation is using the C function
5767// and needs to call a non-static function
5768VALUE
5769rb_vm_splat_array(VALUE flag, VALUE ary)
5770{
5771 return vm_splat_array(flag, ary);
5772}
5773
5774static VALUE
5775vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5776{
5777 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5778
5779 if (flag & VM_CHECKMATCH_ARRAY) {
5780 long i;
5781 const long n = RARRAY_LEN(pattern);
5782
5783 for (i = 0; i < n; i++) {
5784 VALUE v = RARRAY_AREF(pattern, i);
5785 VALUE c = check_match(ec, v, target, type);
5786
5787 if (RTEST(c)) {
5788 return c;
5789 }
5790 }
5791 return Qfalse;
5792 }
5793 else {
5794 return check_match(ec, pattern, target, type);
5795 }
5796}
5797
5798VALUE
5799rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5800{
5801 return vm_check_match(ec, target, pattern, flag);
5802}
5803
5804static VALUE
5805vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5806{
5807 const VALUE kw_bits = *(ep - bits);
5808
5809 if (FIXNUM_P(kw_bits)) {
5810 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5811 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5812 return Qfalse;
5813 }
5814 else {
5815 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5816 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5817 }
5818 return Qtrue;
5819}
5820
5821static void
5822vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5823{
5824 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5825 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5826 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5827 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5828
5829 switch (flag) {
5830 case RUBY_EVENT_CALL:
5831 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5832 return;
5833 case RUBY_EVENT_C_CALL:
5834 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5835 return;
5836 case RUBY_EVENT_RETURN:
5837 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5838 return;
5840 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5841 return;
5842 }
5843 }
5844}
5845
5846static VALUE
5847vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5848{
5849 if (!rb_const_defined_at(cbase, id)) {
5850 return 0;
5851 }
5852 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5853 return rb_public_const_get_at(cbase, id);
5854 }
5855 else {
5856 return rb_const_get_at(cbase, id);
5857 }
5858}
5859
5860static VALUE
5861vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5862{
5863 if (!RB_TYPE_P(klass, T_CLASS)) {
5864 return 0;
5865 }
5866 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5867 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5868
5869 if (tmp != super) {
5870 rb_raise(rb_eTypeError,
5871 "superclass mismatch for class %"PRIsVALUE"",
5872 rb_id2str(id));
5873 }
5874 else {
5875 return klass;
5876 }
5877 }
5878 else {
5879 return klass;
5880 }
5881}
5882
5883static VALUE
5884vm_check_if_module(ID id, VALUE mod)
5885{
5886 if (!RB_TYPE_P(mod, T_MODULE)) {
5887 return 0;
5888 }
5889 else {
5890 return mod;
5891 }
5892}
5893
5894static VALUE
5895declare_under(ID id, VALUE cbase, VALUE c)
5896{
5897 rb_set_class_path_string(c, cbase, rb_id2str(id));
5898 rb_const_set(cbase, id, c);
5899 return c;
5900}
5901
5902static VALUE
5903vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5904{
5905 /* new class declaration */
5906 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5907 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5909 rb_class_inherited(s, c);
5910 return c;
5911}
5912
5913static VALUE
5914vm_declare_module(ID id, VALUE cbase)
5915{
5916 /* new module declaration */
5917 return declare_under(id, cbase, rb_module_new());
5918}
5919
5920NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5921static void
5922unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5923{
5924 VALUE name = rb_id2str(id);
5925 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5926 name, type);
5927 VALUE location = rb_const_source_location_at(cbase, id);
5928 if (!NIL_P(location)) {
5929 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5930 " previous definition of %"PRIsVALUE" was here",
5931 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5932 }
5934}
5935
5936static VALUE
5937vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5938{
5939 VALUE klass;
5940
5941 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5942 rb_raise(rb_eTypeError,
5943 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5944 rb_obj_class(super));
5945 }
5946
5947 vm_check_if_namespace(cbase);
5948
5949 /* find klass */
5950 rb_autoload_load(cbase, id);
5951
5952 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5953 if (!vm_check_if_class(id, flags, super, klass))
5954 unmatched_redefinition("class", cbase, id, klass);
5955 return klass;
5956 }
5957 else {
5958 return vm_declare_class(id, flags, cbase, super);
5959 }
5960}
5961
5962static VALUE
5963vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5964{
5965 VALUE mod;
5966
5967 vm_check_if_namespace(cbase);
5968 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5969 if (!vm_check_if_module(id, mod))
5970 unmatched_redefinition("module", cbase, id, mod);
5971 return mod;
5972 }
5973 else {
5974 return vm_declare_module(id, cbase);
5975 }
5976}
5977
5978static VALUE
5979vm_find_or_create_class_by_id(ID id,
5980 rb_num_t flags,
5981 VALUE cbase,
5982 VALUE super)
5983{
5984 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5985
5986 switch (type) {
5987 case VM_DEFINECLASS_TYPE_CLASS:
5988 /* classdef returns class scope value */
5989 return vm_define_class(id, flags, cbase, super);
5990
5991 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5992 /* classdef returns class scope value */
5993 return rb_singleton_class(cbase);
5994
5995 case VM_DEFINECLASS_TYPE_MODULE:
5996 /* classdef returns class scope value */
5997 return vm_define_module(id, flags, cbase);
5998
5999 default:
6000 rb_bug("unknown defineclass type: %d", (int)type);
6001 }
6002}
6003
6004static rb_method_visibility_t
6005vm_scope_visibility_get(const rb_execution_context_t *ec)
6006{
6007 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6008
6009 if (!vm_env_cref_by_cref(cfp->ep)) {
6010 return METHOD_VISI_PUBLIC;
6011 }
6012 else {
6013 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
6014 }
6015}
6016
6017static int
6018vm_scope_module_func_check(const rb_execution_context_t *ec)
6019{
6020 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
6021
6022 if (!vm_env_cref_by_cref(cfp->ep)) {
6023 return FALSE;
6024 }
6025 else {
6026 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
6027 }
6028}
6029
6030static void
6031vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
6032{
6033 VALUE klass;
6034 rb_method_visibility_t visi;
6035 rb_cref_t *cref = vm_ec_cref(ec);
6036
6037 if (is_singleton) {
6038 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
6039 visi = METHOD_VISI_PUBLIC;
6040 }
6041 else {
6042 klass = CREF_CLASS_FOR_DEFINITION(cref);
6043 visi = vm_scope_visibility_get(ec);
6044 }
6045
6046 if (NIL_P(klass)) {
6047 rb_raise(rb_eTypeError, "no class/module to add method");
6048 }
6049
6050 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
6051 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
6052 if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
6053 RCLASS_SET_MAX_IV_COUNT(klass, rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval));
6054 }
6055
6056 if (!is_singleton && vm_scope_module_func_check(ec)) {
6057 klass = rb_singleton_class(klass);
6058 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
6059 }
6060}
6061
6062static VALUE
6063vm_invokeblock_i(struct rb_execution_context_struct *ec,
6064 struct rb_control_frame_struct *reg_cfp,
6065 struct rb_calling_info *calling)
6066{
6067 const struct rb_callinfo *ci = calling->cd->ci;
6068 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
6069
6070 if (block_handler == VM_BLOCK_HANDLER_NONE) {
6071 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
6072 }
6073 else {
6074 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
6075 }
6076}
6077
6078enum method_explorer_type {
6079 mexp_search_method,
6080 mexp_search_invokeblock,
6081 mexp_search_super,
6082};
6083
6084static inline VALUE
6085vm_sendish(
6086 struct rb_execution_context_struct *ec,
6087 struct rb_control_frame_struct *reg_cfp,
6088 struct rb_call_data *cd,
6089 VALUE block_handler,
6090 enum method_explorer_type method_explorer
6091) {
6092 VALUE val = Qundef;
6093 const struct rb_callinfo *ci = cd->ci;
6094 const struct rb_callcache *cc;
6095 int argc = vm_ci_argc(ci);
6096 VALUE recv = TOPN(argc);
6097 struct rb_calling_info calling = {
6098 .block_handler = block_handler,
6099 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
6100 .recv = recv,
6101 .argc = argc,
6102 .cd = cd,
6103 };
6104
6105 switch (method_explorer) {
6106 case mexp_search_method:
6107 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
6108 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6109 break;
6110 case mexp_search_super:
6111 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
6112 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
6113 break;
6114 case mexp_search_invokeblock:
6115 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
6116 break;
6117 }
6118 return val;
6119}
6120
6121VALUE
6122rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6123{
6124 stack_check(ec);
6125 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
6126 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6127 VM_EXEC(ec, val);
6128 return val;
6129}
6130
6131VALUE
6132rb_vm_sendforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6133{
6134 stack_check(ec);
6135
6136 struct rb_forwarding_call_data adjusted_cd;
6137 struct rb_callinfo adjusted_ci;
6138
6139 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
6140
6141 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
6142
6143 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6144 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6145 }
6146
6147 VM_EXEC(ec, val);
6148 return val;
6149}
6150
6151VALUE
6152rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6153{
6154 stack_check(ec);
6155 VALUE bh = VM_BLOCK_HANDLER_NONE;
6156 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6157 VM_EXEC(ec, val);
6158 return val;
6159}
6160
6161VALUE
6162rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6163{
6164 stack_check(ec);
6165
6166 VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6167 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6168
6169 VM_EXEC(ec, val);
6170 return val;
6171}
6172
6173VALUE
6174rb_vm_invokesuperforward(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6175{
6176 stack_check(ec);
6177 struct rb_forwarding_call_data adjusted_cd;
6178 struct rb_callinfo adjusted_ci;
6179
6180 VALUE bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6181
6182 VALUE val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6183
6184 if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6185 RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6186 }
6187
6188 VM_EXEC(ec, val);
6189 return val;
6190}
6191
6192VALUE
6193rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6194{
6195 stack_check(ec);
6196 VALUE bh = VM_BLOCK_HANDLER_NONE;
6197 VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6198 VM_EXEC(ec, val);
6199 return val;
6200}
6201
6202/* object.c */
6203VALUE rb_nil_to_s(VALUE);
6204VALUE rb_true_to_s(VALUE);
6205VALUE rb_false_to_s(VALUE);
6206/* numeric.c */
6207VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6208VALUE rb_fix_to_s(VALUE);
6209/* variable.c */
6210VALUE rb_mod_to_s(VALUE);
6212
6213static VALUE
6214vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6215{
6216 int type = TYPE(recv);
6217 if (type == T_STRING) {
6218 return recv;
6219 }
6220
6221 const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
6222
6223 switch (type) {
6224 case T_SYMBOL:
6225 if (check_method_basic_definition(cme)) {
6226 // rb_sym_to_s() allocates a mutable string, but since we are only
6227 // going to use this string for interpolation, it's fine to use the
6228 // frozen string.
6229 return rb_sym2str(recv);
6230 }
6231 break;
6232 case T_MODULE:
6233 case T_CLASS:
6234 if (check_cfunc(cme, rb_mod_to_s)) {
6235 // rb_mod_to_s() allocates a mutable string, but since we are only
6236 // going to use this string for interpolation, it's fine to use the
6237 // frozen string.
6238 VALUE val = rb_mod_name(recv);
6239 if (NIL_P(val)) {
6240 val = rb_mod_to_s(recv);
6241 }
6242 return val;
6243 }
6244 break;
6245 case T_NIL:
6246 if (check_cfunc(cme, rb_nil_to_s)) {
6247 return rb_nil_to_s(recv);
6248 }
6249 break;
6250 case T_TRUE:
6251 if (check_cfunc(cme, rb_true_to_s)) {
6252 return rb_true_to_s(recv);
6253 }
6254 break;
6255 case T_FALSE:
6256 if (check_cfunc(cme, rb_false_to_s)) {
6257 return rb_false_to_s(recv);
6258 }
6259 break;
6260 case T_FIXNUM:
6261 if (check_cfunc(cme, rb_int_to_s)) {
6262 return rb_fix_to_s(recv);
6263 }
6264 break;
6265 }
6266 return Qundef;
6267}
6268
6269// ZJIT implementation is using the C function
6270// and needs to call a non-static function
6271VALUE
6272rb_vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6273{
6274 return vm_objtostring(iseq, recv, cd);
6275}
6276
6277static VALUE
6278vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6279{
6280 if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6281 return ary;
6282 }
6283 else {
6284 return Qundef;
6285 }
6286}
6287
6288static VALUE
6289vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6290{
6291 if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6292 return hash;
6293 }
6294 else {
6295 return Qundef;
6296 }
6297}
6298
6299static VALUE
6300vm_opt_str_freeze(VALUE str, int bop, ID id)
6301{
6302 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6303 return str;
6304 }
6305 else {
6306 return Qundef;
6307 }
6308}
6309
6310/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6311#define id_cmp idCmp
6312
6313static VALUE
6314vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6315{
6316 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6317 return rb_ary_includes(ary, target);
6318 }
6319 else {
6320 VALUE args[1] = {target};
6321
6322 // duparray
6323 RUBY_DTRACE_CREATE_HOOK(ARRAY, RARRAY_LEN(ary));
6324 VALUE dupary = rb_ary_resurrect(ary);
6325
6326 return rb_vm_call_with_refinements(ec, dupary, idIncludeP, 1, args, RB_NO_KEYWORDS);
6327 }
6328}
6329
6330VALUE
6331rb_vm_opt_duparray_include_p(rb_execution_context_t *ec, const VALUE ary, VALUE target)
6332{
6333 return vm_opt_duparray_include_p(ec, ary, target);
6334}
6335
6336static VALUE
6337vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6338{
6339 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6340 if (num == 0) {
6341 return Qnil;
6342 }
6343 else {
6344 VALUE result = *ptr;
6345 rb_snum_t i = num - 1;
6346 while (i-- > 0) {
6347 const VALUE v = *++ptr;
6348 if (OPTIMIZED_CMP(v, result) > 0) {
6349 result = v;
6350 }
6351 }
6352 return result;
6353 }
6354 }
6355 else {
6356 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6357 }
6358}
6359
6360VALUE
6361rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6362{
6363 return vm_opt_newarray_max(ec, num, ptr);
6364}
6365
6366static VALUE
6367vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6368{
6369 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6370 if (num == 0) {
6371 return Qnil;
6372 }
6373 else {
6374 VALUE result = *ptr;
6375 rb_snum_t i = num - 1;
6376 while (i-- > 0) {
6377 const VALUE v = *++ptr;
6378 if (OPTIMIZED_CMP(v, result) < 0) {
6379 result = v;
6380 }
6381 }
6382 return result;
6383 }
6384 }
6385 else {
6386 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6387 }
6388}
6389
6390VALUE
6391rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6392{
6393 return vm_opt_newarray_min(ec, num, ptr);
6394}
6395
6396static VALUE
6397vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6398{
6399 // If Array#hash is _not_ monkeypatched, use the optimized call
6400 if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6401 return rb_ary_hash_values(num, ptr);
6402 }
6403 else {
6404 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6405 }
6406}
6407
6408VALUE
6409rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6410{
6411 return vm_opt_newarray_hash(ec, num, ptr);
6412}
6413
6414VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6415VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6416
6417static VALUE
6418vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6419{
6420 if (BASIC_OP_UNREDEFINED_P(BOP_INCLUDE_P, ARRAY_REDEFINED_OP_FLAG)) {
6421 struct RArray fake_ary = {RBASIC_INIT};
6422 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6423 return rb_ary_includes(ary, target);
6424 }
6425 else {
6426 VALUE args[1] = {target};
6427 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idIncludeP, 1, args, RB_NO_KEYWORDS);
6428 }
6429}
6430
6431VALUE
6432rb_vm_opt_newarray_include_p(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE target)
6433{
6434 return vm_opt_newarray_include_p(ec, num, ptr, target);
6435}
6436
6437static VALUE
6438vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6439{
6440 if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6441 struct RArray fake_ary = {RBASIC_INIT};
6442 VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6443 return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6444 }
6445 else {
6446 // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6447 // Setup an array with room for keyword hash.
6448 VALUE args[2];
6449 args[0] = fmt;
6450 int kw_splat = RB_NO_KEYWORDS;
6451 int argc = 1;
6452
6453 if (!UNDEF_P(buffer)) {
6454 args[1] = rb_hash_new_with_size(1);
6455 rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6456 kw_splat = RB_PASS_KEYWORDS;
6457 argc++;
6458 }
6459
6460 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6461 }
6462}
6463
6464VALUE
6465rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6466{
6467 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6468}
6469
6470VALUE
6471rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6472{
6473 return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6474}
6475
6476#undef id_cmp
6477
6478static void
6479vm_track_constant_cache(ID id, void *ic)
6480{
6481 rb_vm_t *vm = GET_VM();
6482 struct rb_id_table *const_cache = vm->constant_cache;
6483 VALUE lookup_result;
6484 set_table *ics;
6485
6486 if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6487 ics = (set_table *)lookup_result;
6488 }
6489 else {
6490 ics = set_init_numtable();
6491 rb_id_table_insert(const_cache, id, (VALUE)ics);
6492 }
6493
6494 /* The call below to st_insert could allocate which could trigger a GC.
6495 * If it triggers a GC, it may free an iseq that also holds a cache to this
6496 * constant. If that iseq is the last iseq with a cache to this constant, then
6497 * it will free this ST table, which would cause an use-after-free during this
6498 * st_insert.
6499 *
6500 * So to fix this issue, we store the ID that is currently being inserted
6501 * and, in remove_from_constant_cache, we don't free the ST table for ID
6502 * equal to this one.
6503 *
6504 * See [Bug #20921].
6505 */
6506 vm->inserting_constant_cache_id = id;
6507
6508 set_insert(ics, (st_data_t)ic);
6509
6510 vm->inserting_constant_cache_id = (ID)0;
6511}
6512
6513static void
6514vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6515{
6516 RB_VM_LOCKING() {
6517 for (int i = 0; segments[i]; i++) {
6518 ID id = segments[i];
6519 if (id == idNULL) continue;
6520 vm_track_constant_cache(id, ic);
6521 }
6522 }
6523}
6524
6525// For JIT inlining
6526static inline bool
6527vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6528{
6529 if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6530 VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6531
6532 return (ic_cref == NULL || // no need to check CREF
6533 ic_cref == vm_get_cref(reg_ep));
6534 }
6535 return false;
6536}
6537
6538static bool
6539vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6540{
6541 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6542 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6543}
6544
6545// YJIT needs this function to never allocate and never raise
6546bool
6547rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6548{
6549 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6550}
6551
6552static void
6553vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6554{
6555 if (ruby_vm_const_missing_count > 0) {
6556 ruby_vm_const_missing_count = 0;
6557 ic->entry = NULL;
6558 return;
6559 }
6560
6561 struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6562 RB_OBJ_WRITE(ice, &ice->value, val);
6563 ice->ic_cref = vm_get_const_key_cref(reg_ep);
6564
6565 if (rb_ractor_shareable_p(val)) {
6566 RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
6567 ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6568 }
6569 RB_OBJ_WRITE(iseq, &ic->entry, ice);
6570 RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6571 unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6572 rb_yjit_constant_ic_update(iseq, ic, pos);
6573}
6574
6575VALUE
6576rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6577{
6578 VALUE val;
6579 const ID *segments = ic->segments;
6580 struct iseq_inline_constant_cache_entry *ice = ic->entry;
6581
6582 if (ice && vm_ic_hit_p(ice, GET_EP())) {
6583 val = ice->value;
6584
6585 VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6586 }
6587 else {
6588 ruby_vm_constant_cache_misses++;
6589 val = vm_get_ev_const_chain(ec, segments);
6590 vm_ic_track_const_chain(GET_CFP(), ic, segments);
6591 // Undo the PC increment to get the address to this instruction
6592 // INSN_ATTR(width) == 2
6593 vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6594 }
6595 return val;
6596}
6597
6598static VALUE
6599vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6600{
6601 rb_thread_t *th = rb_ec_thread_ptr(ec);
6602 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6603
6604 again:
6605 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6606 return is->once.value;
6607 }
6608 else if (is->once.running_thread == NULL) {
6609 VALUE val;
6610 is->once.running_thread = th;
6611 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6612 // TODO: confirm that it is shareable
6613
6614 if (RB_FL_ABLE(val)) {
6615 RB_OBJ_SET_SHAREABLE(val);
6616 }
6617
6618 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6619
6620 /* is->once.running_thread is cleared by vm_once_clear() */
6621 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6622 return val;
6623 }
6624 else if (is->once.running_thread == th) {
6625 /* recursive once */
6626 return vm_once_exec((VALUE)iseq);
6627 }
6628 else {
6629 /* waiting for finish */
6630 RUBY_VM_CHECK_INTS(ec);
6632 goto again;
6633 }
6634}
6635
6636static OFFSET
6637vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6638{
6639 switch (OBJ_BUILTIN_TYPE(key)) {
6640 case -1:
6641 case T_FLOAT:
6642 case T_SYMBOL:
6643 case T_BIGNUM:
6644 case T_STRING:
6645 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6646 SYMBOL_REDEFINED_OP_FLAG |
6647 INTEGER_REDEFINED_OP_FLAG |
6648 FLOAT_REDEFINED_OP_FLAG |
6649 NIL_REDEFINED_OP_FLAG |
6650 TRUE_REDEFINED_OP_FLAG |
6651 FALSE_REDEFINED_OP_FLAG |
6652 STRING_REDEFINED_OP_FLAG)) {
6653 st_data_t val;
6654 if (RB_FLOAT_TYPE_P(key)) {
6655 double kval = RFLOAT_VALUE(key);
6656 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6657 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6658 }
6659 }
6660 if (rb_hash_stlike_lookup(hash, key, &val)) {
6661 return FIX2LONG((VALUE)val);
6662 }
6663 else {
6664 return else_offset;
6665 }
6666 }
6667 }
6668 return 0;
6669}
6670
6671NORETURN(static void
6672 vm_stack_consistency_error(const rb_execution_context_t *ec,
6673 const rb_control_frame_t *,
6674 const VALUE *));
6675static void
6676vm_stack_consistency_error(const rb_execution_context_t *ec,
6677 const rb_control_frame_t *cfp,
6678 const VALUE *bp)
6679{
6680 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6681 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6682 static const char stack_consistency_error[] =
6683 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6684#if defined RUBY_DEVEL
6685 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6686 rb_str_cat_cstr(mesg, "\n");
6687 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6689#else
6690 rb_bug(stack_consistency_error, nsp, nbp);
6691#endif
6692}
6693
6694static VALUE
6695vm_opt_plus(VALUE recv, VALUE obj)
6696{
6697 if (FIXNUM_2_P(recv, obj) &&
6698 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6699 return rb_fix_plus_fix(recv, obj);
6700 }
6701 else if (FLONUM_2_P(recv, obj) &&
6702 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6703 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6704 }
6705 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6706 return Qundef;
6707 }
6708 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6709 RBASIC_CLASS(obj) == rb_cFloat &&
6710 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6711 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6712 }
6713 else if (RBASIC_CLASS(recv) == rb_cString &&
6714 RBASIC_CLASS(obj) == rb_cString &&
6715 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6716 return rb_str_opt_plus(recv, obj);
6717 }
6718 else if (RBASIC_CLASS(recv) == rb_cArray &&
6719 RBASIC_CLASS(obj) == rb_cArray &&
6720 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6721 return rb_ary_plus(recv, obj);
6722 }
6723 else {
6724 return Qundef;
6725 }
6726}
6727
6728static VALUE
6729vm_opt_minus(VALUE recv, VALUE obj)
6730{
6731 if (FIXNUM_2_P(recv, obj) &&
6732 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6733 return rb_fix_minus_fix(recv, obj);
6734 }
6735 else if (FLONUM_2_P(recv, obj) &&
6736 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6737 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6738 }
6739 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6740 return Qundef;
6741 }
6742 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6743 RBASIC_CLASS(obj) == rb_cFloat &&
6744 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6745 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6746 }
6747 else {
6748 return Qundef;
6749 }
6750}
6751
6752static VALUE
6753vm_opt_mult(VALUE recv, VALUE obj)
6754{
6755 if (FIXNUM_2_P(recv, obj) &&
6756 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6757 return rb_fix_mul_fix(recv, obj);
6758 }
6759 else if (FLONUM_2_P(recv, obj) &&
6760 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6761 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6762 }
6763 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6764 return Qundef;
6765 }
6766 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6767 RBASIC_CLASS(obj) == rb_cFloat &&
6768 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6769 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6770 }
6771 else {
6772 return Qundef;
6773 }
6774}
6775
6776static VALUE
6777vm_opt_div(VALUE recv, VALUE obj)
6778{
6779 if (FIXNUM_2_P(recv, obj) &&
6780 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6781 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6782 }
6783 else if (FLONUM_2_P(recv, obj) &&
6784 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6785 return rb_flo_div_flo(recv, obj);
6786 }
6787 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6788 return Qundef;
6789 }
6790 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6791 RBASIC_CLASS(obj) == rb_cFloat &&
6792 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6793 return rb_flo_div_flo(recv, obj);
6794 }
6795 else {
6796 return Qundef;
6797 }
6798}
6799
6800static VALUE
6801vm_opt_mod(VALUE recv, VALUE obj)
6802{
6803 if (FIXNUM_2_P(recv, obj) &&
6804 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6805 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6806 }
6807 else if (FLONUM_2_P(recv, obj) &&
6808 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6809 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6810 }
6811 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6812 return Qundef;
6813 }
6814 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6815 RBASIC_CLASS(obj) == rb_cFloat &&
6816 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6817 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6818 }
6819 else {
6820 return Qundef;
6821 }
6822}
6823
6824static VALUE
6825vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6826{
6827 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6828 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6829
6830 if (!UNDEF_P(val)) {
6831 return RBOOL(!RTEST(val));
6832 }
6833 }
6834
6835 return Qundef;
6836}
6837
6838static VALUE
6839vm_opt_lt(VALUE recv, VALUE obj)
6840{
6841 if (FIXNUM_2_P(recv, obj) &&
6842 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6843 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6844 }
6845 else if (FLONUM_2_P(recv, obj) &&
6846 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6847 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6848 }
6849 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6850 return Qundef;
6851 }
6852 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6853 RBASIC_CLASS(obj) == rb_cFloat &&
6854 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6855 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6856 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6857 }
6858 else {
6859 return Qundef;
6860 }
6861}
6862
6863static VALUE
6864vm_opt_le(VALUE recv, VALUE obj)
6865{
6866 if (FIXNUM_2_P(recv, obj) &&
6867 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6868 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6869 }
6870 else if (FLONUM_2_P(recv, obj) &&
6871 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6872 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6873 }
6874 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6875 return Qundef;
6876 }
6877 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6878 RBASIC_CLASS(obj) == rb_cFloat &&
6879 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6880 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6881 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6882 }
6883 else {
6884 return Qundef;
6885 }
6886}
6887
6888static VALUE
6889vm_opt_gt(VALUE recv, VALUE obj)
6890{
6891 if (FIXNUM_2_P(recv, obj) &&
6892 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6893 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6894 }
6895 else if (FLONUM_2_P(recv, obj) &&
6896 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6897 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6898 }
6899 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6900 return Qundef;
6901 }
6902 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6903 RBASIC_CLASS(obj) == rb_cFloat &&
6904 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6905 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6906 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6907 }
6908 else {
6909 return Qundef;
6910 }
6911}
6912
6913static VALUE
6914vm_opt_ge(VALUE recv, VALUE obj)
6915{
6916 if (FIXNUM_2_P(recv, obj) &&
6917 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6918 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6919 }
6920 else if (FLONUM_2_P(recv, obj) &&
6921 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6922 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6923 }
6924 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6925 return Qundef;
6926 }
6927 else if (RBASIC_CLASS(recv) == rb_cFloat &&
6928 RBASIC_CLASS(obj) == rb_cFloat &&
6929 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6930 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6931 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6932 }
6933 else {
6934 return Qundef;
6935 }
6936}
6937
6938
6939static VALUE
6940vm_opt_ltlt(VALUE recv, VALUE obj)
6941{
6942 if (SPECIAL_CONST_P(recv)) {
6943 return Qundef;
6944 }
6945 else if (RBASIC_CLASS(recv) == rb_cString &&
6946 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6947 if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6948 return rb_str_buf_append(recv, obj);
6949 }
6950 else {
6951 return rb_str_concat(recv, obj);
6952 }
6953 }
6954 else if (RBASIC_CLASS(recv) == rb_cArray &&
6955 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6956 return rb_ary_push(recv, obj);
6957 }
6958 else {
6959 return Qundef;
6960 }
6961}
6962
6963static VALUE
6964vm_opt_and(VALUE recv, VALUE obj)
6965{
6966 // If recv and obj are both fixnums, then the bottom tag bit
6967 // will be 1 on both. 1 & 1 == 1, so the result value will also
6968 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6969 // will be 0, and we return Qundef.
6970 VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6971
6972 if (FIXNUM_P(ret) &&
6973 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6974 return ret;
6975 }
6976 else {
6977 return Qundef;
6978 }
6979}
6980
6981static VALUE
6982vm_opt_or(VALUE recv, VALUE obj)
6983{
6984 if (FIXNUM_2_P(recv, obj) &&
6985 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6986 return recv | obj;
6987 }
6988 else {
6989 return Qundef;
6990 }
6991}
6992
6993static VALUE
6994vm_opt_aref(VALUE recv, VALUE obj)
6995{
6996 if (SPECIAL_CONST_P(recv)) {
6997 if (FIXNUM_2_P(recv, obj) &&
6998 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6999 return rb_fix_aref(recv, obj);
7000 }
7001 return Qundef;
7002 }
7003 else if (RBASIC_CLASS(recv) == rb_cArray &&
7004 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
7005 if (FIXNUM_P(obj)) {
7006 return rb_ary_entry_internal(recv, FIX2LONG(obj));
7007 }
7008 else {
7009 return rb_ary_aref1(recv, obj);
7010 }
7011 }
7012 else if (RBASIC_CLASS(recv) == rb_cHash &&
7013 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
7014 return rb_hash_aref(recv, obj);
7015 }
7016 else {
7017 return Qundef;
7018 }
7019}
7020
7021static VALUE
7022vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
7023{
7024 if (SPECIAL_CONST_P(recv)) {
7025 return Qundef;
7026 }
7027 else if (RBASIC_CLASS(recv) == rb_cArray &&
7028 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
7029 FIXNUM_P(obj)) {
7030 rb_ary_store(recv, FIX2LONG(obj), set);
7031 return set;
7032 }
7033 else if (RBASIC_CLASS(recv) == rb_cHash &&
7034 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
7035 rb_hash_aset(recv, obj, set);
7036 return set;
7037 }
7038 else {
7039 return Qundef;
7040 }
7041}
7042
7043static VALUE
7044vm_opt_length(VALUE recv, int bop)
7045{
7046 if (SPECIAL_CONST_P(recv)) {
7047 return Qundef;
7048 }
7049 else if (RBASIC_CLASS(recv) == rb_cString &&
7050 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
7051 if (bop == BOP_EMPTY_P) {
7052 return LONG2NUM(RSTRING_LEN(recv));
7053 }
7054 else {
7055 return rb_str_length(recv);
7056 }
7057 }
7058 else if (RBASIC_CLASS(recv) == rb_cArray &&
7059 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
7060 return LONG2NUM(RARRAY_LEN(recv));
7061 }
7062 else if (RBASIC_CLASS(recv) == rb_cHash &&
7063 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
7064 return INT2FIX(RHASH_SIZE(recv));
7065 }
7066 else {
7067 return Qundef;
7068 }
7069}
7070
7071static VALUE
7072vm_opt_empty_p(VALUE recv)
7073{
7074 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
7075 case Qundef: return Qundef;
7076 case INT2FIX(0): return Qtrue;
7077 default: return Qfalse;
7078 }
7079}
7080
7081VALUE rb_false(VALUE obj);
7082
7083static VALUE
7084vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7085{
7086 if (NIL_P(recv) &&
7087 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
7088 return Qtrue;
7089 }
7090 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
7091 return Qfalse;
7092 }
7093 else {
7094 return Qundef;
7095 }
7096}
7097
7098static VALUE
7099fix_succ(VALUE x)
7100{
7101 switch (x) {
7102 case ~0UL:
7103 /* 0xFFFF_FFFF == INT2FIX(-1)
7104 * `-1.succ` is of course 0. */
7105 return INT2FIX(0);
7106 case RSHIFT(~0UL, 1):
7107 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
7108 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
7109 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
7110 default:
7111 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
7112 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
7113 * == lx*2 + ly*2 + 1
7114 * == (lx*2+1) + (ly*2+1) - 1
7115 * == x + y - 1
7116 *
7117 * Here, if we put y := INT2FIX(1):
7118 *
7119 * == x + INT2FIX(1) - 1
7120 * == x + 2 .
7121 */
7122 return x + 2;
7123 }
7124}
7125
7126static VALUE
7127vm_opt_succ(VALUE recv)
7128{
7129 if (FIXNUM_P(recv) &&
7130 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
7131 return fix_succ(recv);
7132 }
7133 else if (SPECIAL_CONST_P(recv)) {
7134 return Qundef;
7135 }
7136 else if (RBASIC_CLASS(recv) == rb_cString &&
7137 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
7138 return rb_str_succ(recv);
7139 }
7140 else {
7141 return Qundef;
7142 }
7143}
7144
7145static VALUE
7146vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
7147{
7148 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
7149 return RBOOL(!RTEST(recv));
7150 }
7151 else {
7152 return Qundef;
7153 }
7154}
7155
7156static VALUE
7157vm_opt_regexpmatch2(VALUE recv, VALUE obj)
7158{
7159 if (SPECIAL_CONST_P(recv)) {
7160 return Qundef;
7161 }
7162 else if (RBASIC_CLASS(recv) == rb_cString &&
7163 CLASS_OF(obj) == rb_cRegexp &&
7164 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
7165 return rb_reg_match(obj, recv);
7166 }
7167 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
7168 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
7169 return rb_reg_match(recv, obj);
7170 }
7171 else {
7172 return Qundef;
7173 }
7174}
7175
7176rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
7177
7178NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
7179
7180static inline void
7181vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
7182 rb_event_flag_t pc_events, rb_event_flag_t target_event,
7183 rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
7184{
7185 rb_event_flag_t event = pc_events & target_event;
7186 VALUE self = GET_SELF();
7187
7188 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
7189
7190 if (event & global_hooks->events) {
7191 /* increment PC because source line is calculated with PC-1 */
7192 reg_cfp->pc++;
7193 vm_dtrace(event, ec);
7194 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7195 reg_cfp->pc--;
7196 }
7197
7198 // Load here since global hook above can add and free local hooks
7199 rb_hook_list_t *local_hooks = *local_hooks_ptr;
7200 if (local_hooks != NULL) {
7201 if (event & local_hooks->events) {
7202 /* increment PC because source line is calculated with PC-1 */
7203 reg_cfp->pc++;
7204 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7205 reg_cfp->pc--;
7206 }
7207 }
7208}
7209
7210#define VM_TRACE_HOOK(target_event, val) do { \
7211 if ((pc_events & (target_event)) & enabled_flags) { \
7212 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7213 } \
7214} while (0)
7215
7216static VALUE
7217rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7218{
7219 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7220 VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7221 return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7222}
7223
7224static void
7225vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7226{
7227 const VALUE *pc = reg_cfp->pc;
7228 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7229 rb_event_flag_t global_events = enabled_flags;
7230
7231 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7232 return;
7233 }
7234 else {
7235 const rb_iseq_t *iseq = reg_cfp->iseq;
7236 VALUE iseq_val = (VALUE)iseq;
7237 size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7238 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7239 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7240 rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7241 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7242 rb_hook_list_t *bmethod_local_hooks = NULL;
7243 rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7244 rb_event_flag_t bmethod_local_events = 0;
7245 const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7246 enabled_flags |= iseq_local_events;
7247
7248 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7249
7250 if (bmethod_frame) {
7251 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7252 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7253 bmethod_local_hooks = me->def->body.bmethod.hooks;
7254 bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7255 if (bmethod_local_hooks) {
7256 bmethod_local_events = bmethod_local_hooks->events;
7257 }
7258 }
7259
7260
7261 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7262#if 0
7263 /* disable trace */
7264 /* TODO: incomplete */
7265 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7266#else
7267 /* do not disable trace because of performance problem
7268 * (re-enable overhead)
7269 */
7270#endif
7271 return;
7272 }
7273 else if (ec->trace_arg != NULL) {
7274 /* already tracing */
7275 return;
7276 }
7277 else {
7278 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7279 /* Note, not considering iseq local events here since the same
7280 * iseq could be used in multiple bmethods. */
7281 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7282
7283 if (0) {
7284 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7285 (int)pos,
7286 (int)pc_events,
7287 RSTRING_PTR(rb_iseq_path(iseq)),
7288 (int)rb_iseq_line_no(iseq, pos),
7289 RSTRING_PTR(rb_iseq_label(iseq)));
7290 }
7291 VM_ASSERT(reg_cfp->pc == pc);
7292 VM_ASSERT(pc_events != 0);
7293
7294 /* check traces */
7295 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7296 /* b_call instruction running as a method. Fire call event. */
7297 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7298 }
7300 VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7301 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7302 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7303 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7304 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7305 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7306 /* b_return instruction running as a method. Fire return event. */
7307 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7308 }
7309
7310 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7311 // We need the pointer to stay valid in case compaction happens in a trace hook.
7312 //
7313 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7314 // storage for `rb_method_definition_t` is not on the GC heap.
7315 RB_GC_GUARD(iseq_val);
7316 }
7317 }
7318}
7319#undef VM_TRACE_HOOK
7320
7321#if VM_CHECK_MODE > 0
7322NORETURN( NOINLINE( COLDFUNC
7323void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7324
7325void
7326Init_vm_stack_canary(void)
7327{
7328 /* This has to be called _after_ our PRNG is properly set up. */
7329 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7330 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7331
7332 vm_stack_canary_was_born = true;
7333 VM_ASSERT(n == 0);
7334}
7335
7336void
7337rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7338{
7339 /* Because a method has already been called, why not call
7340 * another one. */
7341 const char *insn = rb_insns_name(i);
7342 VALUE inspection = rb_inspect(c);
7343 const char *str = StringValueCStr(inspection);
7344
7345 rb_bug("dead canary found at %s: %s", insn, str);
7346}
7347
7348#else
7349void Init_vm_stack_canary(void) { /* nothing to do */ }
7350#endif
7351
7352
7353/* a part of the following code is generated by this ruby script:
7354
735516.times{|i|
7356 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7357 typedef_args.prepend(", ") if i != 0
7358 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7359 call_args.prepend(", ") if i != 0
7360 puts %Q{
7361static VALUE
7362builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7363{
7364 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7365 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7366}}
7367}
7368
7369puts
7370puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
737116.times{|i|
7372 puts " builtin_invoker#{i},"
7373}
7374puts "};"
7375*/
7376
7377static VALUE
7378builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7379{
7380 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7381 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7382}
7383
7384static VALUE
7385builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7386{
7387 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7388 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7389}
7390
7391static VALUE
7392builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7393{
7394 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7395 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7396}
7397
7398static VALUE
7399builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7400{
7401 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7402 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7403}
7404
7405static VALUE
7406builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7407{
7408 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7409 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7410}
7411
7412static VALUE
7413builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7414{
7415 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7416 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7417}
7418
7419static VALUE
7420builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7421{
7422 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7423 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7424}
7425
7426static VALUE
7427builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7428{
7429 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7430 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7431}
7432
7433static VALUE
7434builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7435{
7436 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7437 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7438}
7439
7440static VALUE
7441builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7442{
7443 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7444 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7445}
7446
7447static VALUE
7448builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7449{
7450 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7451 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7452}
7453
7454static VALUE
7455builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7456{
7457 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7458 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7459}
7460
7461static VALUE
7462builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7463{
7464 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7465 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7466}
7467
7468static VALUE
7469builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7470{
7471 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7472 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7473}
7474
7475static VALUE
7476builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7477{
7478 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7479 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7480}
7481
7482static VALUE
7483builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7484{
7485 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7486 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7487}
7488
7489typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7490
7491static builtin_invoker
7492lookup_builtin_invoker(int argc)
7493{
7494 static const builtin_invoker invokers[] = {
7495 builtin_invoker0,
7496 builtin_invoker1,
7497 builtin_invoker2,
7498 builtin_invoker3,
7499 builtin_invoker4,
7500 builtin_invoker5,
7501 builtin_invoker6,
7502 builtin_invoker7,
7503 builtin_invoker8,
7504 builtin_invoker9,
7505 builtin_invoker10,
7506 builtin_invoker11,
7507 builtin_invoker12,
7508 builtin_invoker13,
7509 builtin_invoker14,
7510 builtin_invoker15,
7511 };
7512
7513 return invokers[argc];
7514}
7515
7516static inline VALUE
7517invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7518{
7519 const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7520 SETUP_CANARY(canary_p);
7521 rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7522 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7523 CHECK_CANARY(canary_p, BIN(invokebuiltin));
7524 return ret;
7525}
7526
7527static VALUE
7528vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7529{
7530 return invoke_bf(ec, cfp, bf, argv);
7531}
7532
7533static VALUE
7534vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7535{
7536 if (0) { // debug print
7537 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7538 for (int i=0; i<bf->argc; i++) {
7539 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7540 }
7541 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7542 (void *)(uintptr_t)bf->func_ptr);
7543 }
7544
7545 if (bf->argc == 0) {
7546 return invoke_bf(ec, cfp, bf, NULL);
7547 }
7548 else {
7549 const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7550 return invoke_bf(ec, cfp, bf, argv);
7551 }
7552}
7553
7554// for __builtin_inline!()
7555
7556VALUE
7557rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7558{
7559 const rb_control_frame_t *cfp = ec->cfp;
7560 return cfp->ep[index];
7561}
7562
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition event.h:61
static bool RB_FL_ABLE(VALUE obj)
Checks if the object is flaggable.
Definition fl_type.h:440
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2914
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition class.c:1697
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition class.c:1594
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition class.c:1573
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_notimplement(void)
Definition error.c:3839
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:683
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eNoMethodError
NoMethodError exception.
Definition error.c:1438
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition eval.c:696
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition error.c:4160
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition error.h:57
VALUE rb_cClass
Class class.
Definition object.c:63
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2164
VALUE rb_cRegexp
Regexp class.
Definition re.c:2657
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition object.c:1341
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:687
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cModule
Module class.
Definition object.c:62
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:256
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1030
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition re.c:1947
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition re.c:3720
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition re.c:1922
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition re.c:2004
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition re.c:1905
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition re.c:1971
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition re.c:2037
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3791
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition string.c:5327
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3757
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4028
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition string.c:2431
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition symbol.c:937
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1500
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3412
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1986
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition variable.c:4214
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition variable.c:4270
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1461
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3890
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3247
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition variable.c:136
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition variable.c:3418
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition variable.c:423
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition variable.c:2065
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition variable.c:3750
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition variable.c:4292
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition variable.c:3744
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1630
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition vm_method.c:2197
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int off
Offset inside of ptr.
Definition io.h:5
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:384
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition rarray.h:366
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition stdarg.h:64
Ruby's array.
Definition rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition rarray.h:188
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
Definition rarray.h:175
Definition hash.h:53
Definition iseq.h:286
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
Definition vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376