Ruby  3.4.0dev (2024-11-22 revision 37a72b0150ec36b4ea27175039afc28c62207b0c)
vm_insnhelper.c (37a72b0150ec36b4ea27175039afc28c62207b0c)
1 /**********************************************************************
2 
3  vm_insnhelper.c - instruction helper functions.
4 
5  $Author$
6 
7  Copyright (C) 2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 #include "ruby/internal/config.h"
12 
13 #include <math.h>
14 
15 #ifdef HAVE_STDATOMIC_H
16  #include <stdatomic.h>
17 #endif
18 
19 #include "constant.h"
20 #include "debug_counter.h"
21 #include "internal.h"
22 #include "internal/class.h"
23 #include "internal/compar.h"
24 #include "internal/hash.h"
25 #include "internal/numeric.h"
26 #include "internal/proc.h"
27 #include "internal/random.h"
28 #include "internal/variable.h"
29 #include "internal/struct.h"
30 #include "variable.h"
31 
32 /* finish iseq array */
33 #include "insns.inc"
34 #include "insns_info.inc"
35 
36 extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
37 extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
38 extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
39 extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
40  int argc, const VALUE *argv, int priv);
41 
42 static const struct rb_callcache vm_empty_cc;
43 static const struct rb_callcache vm_empty_cc_for_super;
44 
45 /* control stack frame */
46 
47 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
48 
49 VALUE
50 ruby_vm_special_exception_copy(VALUE exc)
51 {
53  rb_obj_copy_ivar(e, exc);
54  return e;
55 }
56 
57 NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58 static void
59 ec_stack_overflow(rb_execution_context_t *ec, int setup)
60 {
61  VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62  ec->raised_flag = RAISED_STACKOVERFLOW;
63  if (setup) {
64  VALUE at = rb_ec_backtrace_object(ec);
65  mesg = ruby_vm_special_exception_copy(mesg);
66  rb_ivar_set(mesg, idBt, at);
67  rb_ivar_set(mesg, idBt_locations, at);
68  }
69  ec->errinfo = mesg;
70  EC_JUMP_TAG(ec, TAG_RAISE);
71 }
72 
73 NORETURN(static void vm_stackoverflow(void));
74 
75 static void
76 vm_stackoverflow(void)
77 {
78  ec_stack_overflow(GET_EC(), TRUE);
79 }
80 
81 NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
82 void
83 rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
84 {
85  if (rb_during_gc()) {
86  rb_bug("system stack overflow during GC. Faulty native extension?");
87  }
88  if (crit) {
89  ec->raised_flag = RAISED_STACKOVERFLOW;
90  ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91  EC_JUMP_TAG(ec, TAG_RAISE);
92  }
93 #ifdef USE_SIGALTSTACK
94  ec_stack_overflow(ec, TRUE);
95 #else
96  ec_stack_overflow(ec, FALSE);
97 #endif
98 }
99 
100 static inline void stack_check(rb_execution_context_t *ec);
101 
102 #if VM_CHECK_MODE > 0
103 static int
104 callable_class_p(VALUE klass)
105 {
106 #if VM_CHECK_MODE >= 2
107  if (!klass) return FALSE;
108  switch (RB_BUILTIN_TYPE(klass)) {
109  default:
110  break;
111  case T_ICLASS:
112  if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
113  case T_MODULE:
114  return TRUE;
115  }
116  while (klass) {
117  if (klass == rb_cBasicObject) {
118  return TRUE;
119  }
120  klass = RCLASS_SUPER(klass);
121  }
122  return FALSE;
123 #else
124  return klass != 0;
125 #endif
126 }
127 
128 static int
129 callable_method_entry_p(const rb_callable_method_entry_t *cme)
130 {
131  if (cme == NULL) {
132  return TRUE;
133  }
134  else {
135  VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
136 
137  if (callable_class_p(cme->defined_class)) {
138  return TRUE;
139  }
140  else {
141  return FALSE;
142  }
143  }
144 }
145 
146 static void
147 vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
148 {
149  unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
150  enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
151 
152  if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
153  cref_or_me_type = imemo_type(cref_or_me);
154  }
155  if (type & VM_FRAME_FLAG_BMETHOD) {
156  req_me = TRUE;
157  }
158 
159  if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
160  rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
161  }
162  if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
163  rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
164  }
165 
166  if (req_me) {
167  if (cref_or_me_type != imemo_ment) {
168  rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
169  }
170  }
171  else {
172  if (req_cref && cref_or_me_type != imemo_cref) {
173  rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
174  }
175  else { /* cref or Qfalse */
176  if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
177  if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
178  /* ignore */
179  }
180  else {
181  rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
182  }
183  }
184  }
185  }
186 
187  if (cref_or_me_type == imemo_ment) {
188  const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
189 
190  if (!callable_method_entry_p(me)) {
191  rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
192  }
193  }
194 
195  if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
196  VM_ASSERT(iseq == NULL ||
197  RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
198  RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
199  );
200  }
201  else {
202  VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
203  }
204 }
205 
206 static void
207 vm_check_frame(VALUE type,
208  VALUE specval,
209  VALUE cref_or_me,
210  const rb_iseq_t *iseq)
211 {
212  VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
213  VM_ASSERT(FIXNUM_P(type));
214 
215 #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
216  case magic: \
217  vm_check_frame_detail(type, req_block, req_me, req_cref, \
218  specval, cref_or_me, is_cframe, iseq); \
219  break
220  switch (given_magic) {
221  /* BLK ME CREF CFRAME */
222  CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
223  CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
224  CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
225  CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
226  CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
227  CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
228  CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
229  CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
230  CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
231  default:
232  rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
233  }
234 #undef CHECK
235 }
236 
237 static VALUE vm_stack_canary; /* Initialized later */
238 static bool vm_stack_canary_was_born = false;
239 
240 // Return the index of the instruction right before the given PC.
241 // This is needed because insn_entry advances PC before the insn body.
242 static unsigned int
243 previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
244 {
245  unsigned int pos = 0;
246  while (pos < ISEQ_BODY(iseq)->iseq_size) {
247  int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
248  unsigned int next_pos = pos + insn_len(opcode);
249  if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
250  return pos;
251  }
252  pos = next_pos;
253  }
254  rb_bug("failed to find the previous insn");
255 }
256 
257 void
258 rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
259 {
260  const struct rb_control_frame_struct *reg_cfp = ec->cfp;
261  const struct rb_iseq_struct *iseq;
262 
263  if (! LIKELY(vm_stack_canary_was_born)) {
264  return; /* :FIXME: isn't it rather fatal to enter this branch? */
265  }
266  else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
267  /* This is at the very beginning of a thread. cfp does not exist. */
268  return;
269  }
270  else if (! (iseq = GET_ISEQ())) {
271  return;
272  }
273  else if (LIKELY(sp[0] != vm_stack_canary)) {
274  return;
275  }
276  else {
277  /* we are going to call methods below; squash the canary to
278  * prevent infinite loop. */
279  sp[0] = Qundef;
280  }
281 
282  const VALUE *orig = rb_iseq_original_iseq(iseq);
283  const VALUE iseqw = rb_iseqw_new(iseq);
284  const VALUE inspection = rb_inspect(iseqw);
285  const char *stri = rb_str_to_cstr(inspection);
286  const VALUE disasm = rb_iseq_disasm(iseq);
287  const char *strd = rb_str_to_cstr(disasm);
288  const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
289  const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
290  const char *name = insn_name(insn);
291 
292  /* rb_bug() is not capable of outputting this large contents. It
293  is designed to run form a SIGSEGV handler, which tends to be
294  very restricted. */
295  ruby_debug_printf(
296  "We are killing the stack canary set by %s, "
297  "at %s@pc=%"PRIdPTR"\n"
298  "watch out the C stack trace.\n"
299  "%s",
300  name, stri, pos, strd);
301  rb_bug("see above.");
302 }
303 #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
304 
305 #else
306 #define vm_check_canary(ec, sp)
307 #define vm_check_frame(a, b, c, d)
308 #endif /* VM_CHECK_MODE > 0 */
309 
310 #if USE_DEBUG_COUNTER
311 static void
312 vm_push_frame_debug_counter_inc(
313  const struct rb_execution_context_struct *ec,
314  const struct rb_control_frame_struct *reg_cfp,
315  VALUE type)
316 {
317  const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
318 
319  RB_DEBUG_COUNTER_INC(frame_push);
320 
321  if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
322  const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
323  const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
324  if (prev) {
325  if (curr) {
326  RB_DEBUG_COUNTER_INC(frame_R2R);
327  }
328  else {
329  RB_DEBUG_COUNTER_INC(frame_R2C);
330  }
331  }
332  else {
333  if (curr) {
334  RB_DEBUG_COUNTER_INC(frame_C2R);
335  }
336  else {
337  RB_DEBUG_COUNTER_INC(frame_C2C);
338  }
339  }
340  }
341 
342  switch (type & VM_FRAME_MAGIC_MASK) {
343  case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
344  case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
345  case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
346  case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
347  case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
348  case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
349  case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
350  case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
351  case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
352  }
353 
354  rb_bug("unreachable");
355 }
356 #else
357 #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
358 #endif
359 
360 // Return a poison value to be set above the stack top to verify leafness.
361 VALUE
362 rb_vm_stack_canary(void)
363 {
364 #if VM_CHECK_MODE > 0
365  return vm_stack_canary;
366 #else
367  return 0;
368 #endif
369 }
370 
371 STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
372 STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
373 STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
374 
375 static void
376 vm_push_frame(rb_execution_context_t *ec,
377  const rb_iseq_t *iseq,
378  VALUE type,
379  VALUE self,
380  VALUE specval,
381  VALUE cref_or_me,
382  const VALUE *pc,
383  VALUE *sp,
384  int local_size,
385  int stack_max)
386 {
387  rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
388 
389  vm_check_frame(type, specval, cref_or_me, iseq);
390  VM_ASSERT(local_size >= 0);
391 
392  /* check stack overflow */
393  CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
394  vm_check_canary(ec, sp);
395 
396  /* setup vm value stack */
397 
398  /* initialize local variables */
399  for (int i=0; i < local_size; i++) {
400  *sp++ = Qnil;
401  }
402 
403  /* setup ep with managing data */
404  *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
405  *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
406  *sp++ = type; /* ep[-0] / ENV_FLAGS */
407 
408  /* setup new frame */
409  *cfp = (const struct rb_control_frame_struct) {
410  .pc = pc,
411  .sp = sp,
412  .iseq = iseq,
413  .self = self,
414  .ep = sp - 1,
415  .block_code = NULL,
416 #if VM_DEBUG_BP_CHECK
417  .bp_check = sp,
418 #endif
419  .jit_return = NULL
420  };
421 
422  /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
423  This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
424  future/untested compilers/platforms. */
425 
426  #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
427  atomic_signal_fence(memory_order_seq_cst);
428  #endif
429 
430  ec->cfp = cfp;
431 
432  if (VMDEBUG == 2) {
433  SDR();
434  }
435  vm_push_frame_debug_counter_inc(ec, cfp, type);
436 }
437 
438 void
439 rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
440 {
441  rb_control_frame_t *cfp = ec->cfp;
442 
443  if (VMDEBUG == 2) SDR();
444 
445  ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
446 }
447 
448 /* return TRUE if the frame is finished */
449 static inline int
450 vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
451 {
452  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
453 
454  if (VMDEBUG == 2) SDR();
455 
456  RUBY_VM_CHECK_INTS(ec);
457  ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
458 
459  return flags & VM_FRAME_FLAG_FINISH;
460 }
461 
462 void
463 rb_vm_pop_frame(rb_execution_context_t *ec)
464 {
465  vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
466 }
467 
468 // it pushes pseudo-frame with fname filename.
469 VALUE
470 rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
471 {
472  VALUE tmpbuf = rb_imemo_tmpbuf_auto_free_pointer();
473  void *ptr = ruby_xcalloc(sizeof(struct rb_iseq_constant_body) + sizeof(struct rb_iseq_struct), 1);
474  rb_imemo_tmpbuf_set_ptr(tmpbuf, ptr);
475 
476  struct rb_iseq_struct *dmy_iseq = (struct rb_iseq_struct *)ptr;
477  struct rb_iseq_constant_body *dmy_body = (struct rb_iseq_constant_body *)&dmy_iseq[1];
478  dmy_iseq->body = dmy_body;
479  dmy_body->type = ISEQ_TYPE_TOP;
480  dmy_body->location.pathobj = fname;
481 
482  vm_push_frame(ec,
483  dmy_iseq, //const rb_iseq_t *iseq,
484  VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
485  ec->cfp->self, // VALUE self,
486  VM_BLOCK_HANDLER_NONE, // VALUE specval,
487  Qfalse, // VALUE cref_or_me,
488  NULL, // const VALUE *pc,
489  ec->cfp->sp, // VALUE *sp,
490  0, // int local_size,
491  0); // int stack_max
492 
493  return tmpbuf;
494 }
495 
496 /* method dispatch */
497 static inline VALUE
498 rb_arity_error_new(int argc, int min, int max)
499 {
500  VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
501  if (min == max) {
502  /* max is not needed */
503  }
504  else if (max == UNLIMITED_ARGUMENTS) {
505  rb_str_cat_cstr(err_mess, "+");
506  }
507  else {
508  rb_str_catf(err_mess, "..%d", max);
509  }
510  rb_str_cat_cstr(err_mess, ")");
511  return rb_exc_new3(rb_eArgError, err_mess);
512 }
513 
514 void
515 rb_error_arity(int argc, int min, int max)
516 {
517  rb_exc_raise(rb_arity_error_new(argc, min, max));
518 }
519 
520 /* lvar */
521 
522 NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
523 
524 static void
525 vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
526 {
527  /* remember env value forcely */
528  rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
529  VM_FORCE_WRITE(&ep[index], v);
530  VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
531  RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
532 }
533 
534 // YJIT assumes this function never runs GC
535 static inline void
536 vm_env_write(const VALUE *ep, int index, VALUE v)
537 {
538  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
539  if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
540  VM_STACK_ENV_WRITE(ep, index, v);
541  }
542  else {
543  vm_env_write_slowpath(ep, index, v);
544  }
545 }
546 
547 void
548 rb_vm_env_write(const VALUE *ep, int index, VALUE v)
549 {
550  vm_env_write(ep, index, v);
551 }
552 
553 VALUE
554 rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
555 {
556  if (block_handler == VM_BLOCK_HANDLER_NONE) {
557  return Qnil;
558  }
559  else {
560  switch (vm_block_handler_type(block_handler)) {
561  case block_handler_type_iseq:
562  case block_handler_type_ifunc:
563  return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
564  case block_handler_type_symbol:
565  return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
566  case block_handler_type_proc:
567  return VM_BH_TO_PROC(block_handler);
568  default:
569  VM_UNREACHABLE(rb_vm_bh_to_procval);
570  }
571  }
572 }
573 
574 /* svar */
575 
576 #if VM_CHECK_MODE > 0
577 static int
578 vm_svar_valid_p(VALUE svar)
579 {
580  if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
581  switch (imemo_type(svar)) {
582  case imemo_svar:
583  case imemo_cref:
584  case imemo_ment:
585  return TRUE;
586  default:
587  break;
588  }
589  }
590  rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
591  return FALSE;
592 }
593 #endif
594 
595 static inline struct vm_svar *
596 lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
597 {
598  VALUE svar;
599 
600  if (lep && (ec == NULL || ec->root_lep != lep)) {
601  svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
602  }
603  else {
604  svar = ec->root_svar;
605  }
606 
607  VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
608 
609  return (struct vm_svar *)svar;
610 }
611 
612 static inline void
613 lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
614 {
615  VM_ASSERT(vm_svar_valid_p((VALUE)svar));
616 
617  if (lep && (ec == NULL || ec->root_lep != lep)) {
618  vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
619  }
620  else {
621  RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
622  }
623 }
624 
625 static VALUE
626 lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
627 {
628  const struct vm_svar *svar = lep_svar(ec, lep);
629 
630  if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
631 
632  switch (key) {
633  case VM_SVAR_LASTLINE:
634  return svar->lastline;
635  case VM_SVAR_BACKREF:
636  return svar->backref;
637  default: {
638  const VALUE ary = svar->others;
639 
640  if (NIL_P(ary)) {
641  return Qnil;
642  }
643  else {
644  return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
645  }
646  }
647  }
648 }
649 
650 static struct vm_svar *
651 svar_new(VALUE obj)
652 {
653  struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
654  *((VALUE *)&svar->lastline) = Qnil;
655  *((VALUE *)&svar->backref) = Qnil;
656  *((VALUE *)&svar->others) = Qnil;
657 
658  return svar;
659 }
660 
661 static void
662 lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
663 {
664  struct vm_svar *svar = lep_svar(ec, lep);
665 
666  if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
667  lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
668  }
669 
670  switch (key) {
671  case VM_SVAR_LASTLINE:
672  RB_OBJ_WRITE(svar, &svar->lastline, val);
673  return;
674  case VM_SVAR_BACKREF:
675  RB_OBJ_WRITE(svar, &svar->backref, val);
676  return;
677  default: {
678  VALUE ary = svar->others;
679 
680  if (NIL_P(ary)) {
681  RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
682  }
683  rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
684  }
685  }
686 }
687 
688 static inline VALUE
689 vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
690 {
691  VALUE val;
692 
693  if (type == 0) {
694  val = lep_svar_get(ec, lep, key);
695  }
696  else {
697  VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
698 
699  if (type & 0x01) {
700  switch (type >> 1) {
701  case '&':
702  val = rb_reg_last_match(backref);
703  break;
704  case '`':
705  val = rb_reg_match_pre(backref);
706  break;
707  case '\'':
708  val = rb_reg_match_post(backref);
709  break;
710  case '+':
711  val = rb_reg_match_last(backref);
712  break;
713  default:
714  rb_bug("unexpected back-ref");
715  }
716  }
717  else {
718  val = rb_reg_nth_match((int)(type >> 1), backref);
719  }
720  }
721  return val;
722 }
723 
724 static inline VALUE
725 vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
726 {
727  VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
728  int nth = 0;
729 
730  if (type & 0x01) {
731  switch (type >> 1) {
732  case '&':
733  case '`':
734  case '\'':
735  break;
736  case '+':
737  return rb_reg_last_defined(backref);
738  default:
739  rb_bug("unexpected back-ref");
740  }
741  }
742  else {
743  nth = (int)(type >> 1);
744  }
745  return rb_reg_nth_defined(nth, backref);
746 }
747 
748 PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
750 check_method_entry(VALUE obj, int can_be_svar)
751 {
752  if (obj == Qfalse) return NULL;
753 
754 #if VM_CHECK_MODE > 0
755  if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
756 #endif
757 
758  switch (imemo_type(obj)) {
759  case imemo_ment:
760  return (rb_callable_method_entry_t *)obj;
761  case imemo_cref:
762  return NULL;
763  case imemo_svar:
764  if (can_be_svar) {
765  return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
766  }
767  default:
768 #if VM_CHECK_MODE > 0
769  rb_bug("check_method_entry: svar should not be there:");
770 #endif
771  return NULL;
772  }
773 }
774 
776 rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
777 {
778  const VALUE *ep = cfp->ep;
780 
781  while (!VM_ENV_LOCAL_P(ep)) {
782  if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
783  ep = VM_ENV_PREV_EP(ep);
784  }
785 
786  return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
787 }
788 
789 static const rb_iseq_t *
790 method_entry_iseqptr(const rb_callable_method_entry_t *me)
791 {
792  switch (me->def->type) {
793  case VM_METHOD_TYPE_ISEQ:
794  return me->def->body.iseq.iseqptr;
795  default:
796  return NULL;
797  }
798 }
799 
800 static rb_cref_t *
801 method_entry_cref(const rb_callable_method_entry_t *me)
802 {
803  switch (me->def->type) {
804  case VM_METHOD_TYPE_ISEQ:
805  return me->def->body.iseq.cref;
806  default:
807  return NULL;
808  }
809 }
810 
811 #if VM_CHECK_MODE == 0
812 PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
813 #endif
814 static rb_cref_t *
815 check_cref(VALUE obj, int can_be_svar)
816 {
817  if (obj == Qfalse) return NULL;
818 
819 #if VM_CHECK_MODE > 0
820  if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
821 #endif
822 
823  switch (imemo_type(obj)) {
824  case imemo_ment:
825  return method_entry_cref((rb_callable_method_entry_t *)obj);
826  case imemo_cref:
827  return (rb_cref_t *)obj;
828  case imemo_svar:
829  if (can_be_svar) {
830  return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
831  }
832  default:
833 #if VM_CHECK_MODE > 0
834  rb_bug("check_method_entry: svar should not be there:");
835 #endif
836  return NULL;
837  }
838 }
839 
840 static inline rb_cref_t *
841 vm_env_cref(const VALUE *ep)
842 {
843  rb_cref_t *cref;
844 
845  while (!VM_ENV_LOCAL_P(ep)) {
846  if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
847  ep = VM_ENV_PREV_EP(ep);
848  }
849 
850  return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
851 }
852 
853 static int
854 is_cref(const VALUE v, int can_be_svar)
855 {
856  if (RB_TYPE_P(v, T_IMEMO)) {
857  switch (imemo_type(v)) {
858  case imemo_cref:
859  return TRUE;
860  case imemo_svar:
861  if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
862  default:
863  break;
864  }
865  }
866  return FALSE;
867 }
868 
869 static int
870 vm_env_cref_by_cref(const VALUE *ep)
871 {
872  while (!VM_ENV_LOCAL_P(ep)) {
873  if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
874  ep = VM_ENV_PREV_EP(ep);
875  }
876  return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
877 }
878 
879 static rb_cref_t *
880 cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
881 {
882  const VALUE v = *vptr;
883  rb_cref_t *cref, *new_cref;
884 
885  if (RB_TYPE_P(v, T_IMEMO)) {
886  switch (imemo_type(v)) {
887  case imemo_cref:
888  cref = (rb_cref_t *)v;
889  new_cref = vm_cref_dup(cref);
890  if (parent) {
891  RB_OBJ_WRITE(parent, vptr, new_cref);
892  }
893  else {
894  VM_FORCE_WRITE(vptr, (VALUE)new_cref);
895  }
896  return (rb_cref_t *)new_cref;
897  case imemo_svar:
898  if (can_be_svar) {
899  return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
900  }
901  /* fall through */
902  case imemo_ment:
903  rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
904  default:
905  break;
906  }
907  }
908  return NULL;
909 }
910 
911 static rb_cref_t *
912 vm_cref_replace_with_duplicated_cref(const VALUE *ep)
913 {
914  if (vm_env_cref_by_cref(ep)) {
915  rb_cref_t *cref;
916  VALUE envval;
917 
918  while (!VM_ENV_LOCAL_P(ep)) {
919  envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
920  if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
921  return cref;
922  }
923  ep = VM_ENV_PREV_EP(ep);
924  }
925  envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
926  return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
927  }
928  else {
929  rb_bug("vm_cref_dup: unreachable");
930  }
931 }
932 
933 static rb_cref_t *
934 vm_get_cref(const VALUE *ep)
935 {
936  rb_cref_t *cref = vm_env_cref(ep);
937 
938  if (cref != NULL) {
939  return cref;
940  }
941  else {
942  rb_bug("vm_get_cref: unreachable");
943  }
944 }
945 
946 rb_cref_t *
947 rb_vm_get_cref(const VALUE *ep)
948 {
949  return vm_get_cref(ep);
950 }
951 
952 static rb_cref_t *
953 vm_ec_cref(const rb_execution_context_t *ec)
954 {
955  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
956 
957  if (cfp == NULL) {
958  return NULL;
959  }
960  return vm_get_cref(cfp->ep);
961 }
962 
963 static const rb_cref_t *
964 vm_get_const_key_cref(const VALUE *ep)
965 {
966  const rb_cref_t *cref = vm_get_cref(ep);
967  const rb_cref_t *key_cref = cref;
968 
969  while (cref) {
970  if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
971  RCLASS_EXT(CREF_CLASS(cref))->cloned) {
972  return key_cref;
973  }
974  cref = CREF_NEXT(cref);
975  }
976 
977  /* does not include singleton class */
978  return NULL;
979 }
980 
981 void
982 rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
983 {
984  rb_cref_t *new_cref;
985 
986  while (cref) {
987  if (CREF_CLASS(cref) == old_klass) {
988  new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989  *new_cref_ptr = new_cref;
990  return;
991  }
992  new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993  cref = CREF_NEXT(cref);
994  *new_cref_ptr = new_cref;
995  new_cref_ptr = &new_cref->next;
996  }
997  *new_cref_ptr = NULL;
998 }
999 
1000 static rb_cref_t *
1001 vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1002 {
1003  rb_cref_t *prev_cref = NULL;
1004 
1005  if (ep) {
1006  prev_cref = vm_env_cref(ep);
1007  }
1008  else {
1009  rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1010 
1011  if (cfp) {
1012  prev_cref = vm_env_cref(cfp->ep);
1013  }
1014  }
1015 
1016  return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1017 }
1018 
1019 static inline VALUE
1020 vm_get_cbase(const VALUE *ep)
1021 {
1022  const rb_cref_t *cref = vm_get_cref(ep);
1023 
1024  return CREF_CLASS_FOR_DEFINITION(cref);
1025 }
1026 
1027 static inline VALUE
1028 vm_get_const_base(const VALUE *ep)
1029 {
1030  const rb_cref_t *cref = vm_get_cref(ep);
1031 
1032  while (cref) {
1033  if (!CREF_PUSHED_BY_EVAL(cref)) {
1034  return CREF_CLASS_FOR_DEFINITION(cref);
1035  }
1036  cref = CREF_NEXT(cref);
1037  }
1038 
1039  return Qundef;
1040 }
1041 
1042 static inline void
1043 vm_check_if_namespace(VALUE klass)
1044 {
1045  if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1046  rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1047  }
1048 }
1049 
1050 static inline void
1051 vm_ensure_not_refinement_module(VALUE self)
1052 {
1053  if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1054  rb_warn("not defined at the refinement, but at the outer class/module");
1055  }
1056 }
1057 
1058 static inline VALUE
1059 vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1060 {
1061  return klass;
1062 }
1063 
1064 static inline VALUE
1065 vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1066 {
1067  void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1068  VALUE val;
1069 
1070  if (NIL_P(orig_klass) && allow_nil) {
1071  /* in current lexical scope */
1072  const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1073  const rb_cref_t *cref;
1074  VALUE klass = Qnil;
1075 
1076  while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1077  root_cref = CREF_NEXT(root_cref);
1078  }
1079  cref = root_cref;
1080  while (cref && CREF_NEXT(cref)) {
1081  if (CREF_PUSHED_BY_EVAL(cref)) {
1082  klass = Qnil;
1083  }
1084  else {
1085  klass = CREF_CLASS(cref);
1086  }
1087  cref = CREF_NEXT(cref);
1088 
1089  if (!NIL_P(klass)) {
1090  VALUE av, am = 0;
1091  rb_const_entry_t *ce;
1092  search_continue:
1093  if ((ce = rb_const_lookup(klass, id))) {
1094  rb_const_warn_if_deprecated(ce, klass, id);
1095  val = ce->value;
1096  if (UNDEF_P(val)) {
1097  if (am == klass) break;
1098  am = klass;
1099  if (is_defined) return 1;
1100  if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1101  rb_autoload_load(klass, id);
1102  goto search_continue;
1103  }
1104  else {
1105  if (is_defined) {
1106  return 1;
1107  }
1108  else {
1109  if (UNLIKELY(!rb_ractor_main_p())) {
1110  if (!rb_ractor_shareable_p(val)) {
1111  rb_raise(rb_eRactorIsolationError,
1112  "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1113  }
1114  }
1115  return val;
1116  }
1117  }
1118  }
1119  }
1120  }
1121 
1122  /* search self */
1123  if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1124  klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1125  }
1126  else {
1127  klass = CLASS_OF(ec->cfp->self);
1128  }
1129 
1130  if (is_defined) {
1131  return rb_const_defined(klass, id);
1132  }
1133  else {
1134  return rb_const_get(klass, id);
1135  }
1136  }
1137  else {
1138  vm_check_if_namespace(orig_klass);
1139  if (is_defined) {
1140  return rb_public_const_defined_from(orig_klass, id);
1141  }
1142  else {
1143  return rb_public_const_get_from(orig_klass, id);
1144  }
1145  }
1146 }
1147 
1148 VALUE
1149 rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1150 {
1151  return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1152 }
1153 
1154 static inline VALUE
1155 vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1156 {
1157  VALUE val = Qnil;
1158  int idx = 0;
1159  int allow_nil = TRUE;
1160  if (segments[0] == idNULL) {
1161  val = rb_cObject;
1162  idx++;
1163  allow_nil = FALSE;
1164  }
1165  while (segments[idx]) {
1166  ID id = segments[idx++];
1167  val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1168  allow_nil = FALSE;
1169  }
1170  return val;
1171 }
1172 
1173 
1174 static inline VALUE
1175 vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1176 {
1177  VALUE klass;
1178 
1179  if (!cref) {
1180  rb_bug("vm_get_cvar_base: no cref");
1181  }
1182 
1183  while (CREF_NEXT(cref) &&
1184  (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1185  CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1186  cref = CREF_NEXT(cref);
1187  }
1188  if (top_level_raise && !CREF_NEXT(cref)) {
1189  rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1190  }
1191 
1192  klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1193 
1194  if (NIL_P(klass)) {
1195  rb_raise(rb_eTypeError, "no class variables available");
1196  }
1197  return klass;
1198 }
1199 
1200 ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1201 static inline void
1202 fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1203 {
1204  if (is_attr) {
1205  vm_cc_attr_index_set(cc, index, shape_id);
1206  }
1207  else {
1208  vm_ic_attr_index_set(iseq, ic, index, shape_id);
1209  }
1210 }
1211 
1212 #define ractor_incidental_shareable_p(cond, val) \
1213  (!(cond) || rb_ractor_shareable_p(val))
1214 #define ractor_object_incidental_shareable_p(obj, val) \
1215  ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1216 
1217 #define ATTR_INDEX_NOT_SET (attr_index_t)-1
1218 
1219 ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1220 static inline VALUE
1221 vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1222 {
1223 #if OPT_IC_FOR_IVAR
1224  VALUE val = Qundef;
1225  shape_id_t shape_id;
1226  VALUE * ivar_list;
1227 
1228  if (SPECIAL_CONST_P(obj)) {
1229  return default_value;
1230  }
1231 
1232 #if SHAPE_IN_BASIC_FLAGS
1233  shape_id = RBASIC_SHAPE_ID(obj);
1234 #endif
1235 
1236  switch (BUILTIN_TYPE(obj)) {
1237  case T_OBJECT:
1238  ivar_list = ROBJECT_IVPTR(obj);
1239  VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1240 
1241 #if !SHAPE_IN_BASIC_FLAGS
1242  shape_id = ROBJECT_SHAPE_ID(obj);
1243 #endif
1244  break;
1245  case T_CLASS:
1246  case T_MODULE:
1247  {
1248  if (UNLIKELY(!rb_ractor_main_p())) {
1249  // For two reasons we can only use the fast path on the main
1250  // ractor.
1251  // First, only the main ractor is allowed to set ivars on classes
1252  // and modules. So we can skip locking.
1253  // Second, other ractors need to check the shareability of the
1254  // values returned from the class ivars.
1255 
1256  if (default_value == Qundef) { // defined?
1257  return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1258  }
1259  else {
1260  goto general_path;
1261  }
1262  }
1263 
1264  ivar_list = RCLASS_IVPTR(obj);
1265 
1266 #if !SHAPE_IN_BASIC_FLAGS
1267  shape_id = RCLASS_SHAPE_ID(obj);
1268 #endif
1269 
1270  break;
1271  }
1272  default:
1273  if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1274  struct gen_ivtbl *ivtbl;
1275  rb_gen_ivtbl_get(obj, id, &ivtbl);
1276 #if !SHAPE_IN_BASIC_FLAGS
1277  shape_id = ivtbl->shape_id;
1278 #endif
1279  ivar_list = ivtbl->as.shape.ivptr;
1280  }
1281  else {
1282  return default_value;
1283  }
1284  }
1285 
1286  shape_id_t cached_id;
1287  attr_index_t index;
1288 
1289  if (is_attr) {
1290  vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1291  }
1292  else {
1293  vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1294  }
1295 
1296  if (LIKELY(cached_id == shape_id)) {
1297  RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1298 
1299  if (index == ATTR_INDEX_NOT_SET) {
1300  return default_value;
1301  }
1302 
1303  val = ivar_list[index];
1304 #if USE_DEBUG_COUNTER
1305  RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1306 
1307  if (RB_TYPE_P(obj, T_OBJECT)) {
1308  RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1309  }
1310 #endif
1311  RUBY_ASSERT(!UNDEF_P(val));
1312  }
1313  else { // cache miss case
1314 #if USE_DEBUG_COUNTER
1315  if (is_attr) {
1316  if (cached_id != INVALID_SHAPE_ID) {
1317  RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1318  }
1319  else {
1320  RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1321  }
1322  }
1323  else {
1324  if (cached_id != INVALID_SHAPE_ID) {
1325  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1326  }
1327  else {
1328  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1329  }
1330  }
1331  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1332 
1333  if (RB_TYPE_P(obj, T_OBJECT)) {
1334  RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1335  }
1336 #endif
1337 
1338  if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
1339  st_table *table = NULL;
1340  switch (BUILTIN_TYPE(obj)) {
1341  case T_CLASS:
1342  case T_MODULE:
1343  table = (st_table *)RCLASS_IVPTR(obj);
1344  break;
1345 
1346  case T_OBJECT:
1347  table = ROBJECT_IV_HASH(obj);
1348  break;
1349 
1350  default: {
1351  struct gen_ivtbl *ivtbl;
1352  if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
1353  table = ivtbl->as.complex.table;
1354  }
1355  break;
1356  }
1357  }
1358 
1359  if (!table || !st_lookup(table, id, &val)) {
1360  val = default_value;
1361  }
1362  }
1363  else {
1364  shape_id_t previous_cached_id = cached_id;
1365  if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1366  // This fills in the cache with the shared cache object.
1367  // "ent" is the shared cache object
1368  if (cached_id != previous_cached_id) {
1369  fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1370  }
1371 
1372  if (index == ATTR_INDEX_NOT_SET) {
1373  val = default_value;
1374  }
1375  else {
1376  // We fetched the ivar list above
1377  val = ivar_list[index];
1378  RUBY_ASSERT(!UNDEF_P(val));
1379  }
1380  }
1381  else {
1382  if (is_attr) {
1383  vm_cc_attr_index_initialize(cc, shape_id);
1384  }
1385  else {
1386  vm_ic_attr_index_initialize(ic, shape_id);
1387  }
1388 
1389  val = default_value;
1390  }
1391  }
1392 
1393  }
1394 
1395  if (!UNDEF_P(default_value)) {
1396  RUBY_ASSERT(!UNDEF_P(val));
1397  }
1398 
1399  return val;
1400 
1401 general_path:
1402 #endif /* OPT_IC_FOR_IVAR */
1403  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404 
1405  if (is_attr) {
1406  return rb_attr_get(obj, id);
1407  }
1408  else {
1409  return rb_ivar_get(obj, id);
1410  }
1411 }
1412 
1413 static void
1414 populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415 {
1416  RUBY_ASSERT(next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1417 
1418  // Cache population code
1419  if (is_attr) {
1420  vm_cc_attr_index_set(cc, index, next_shape_id);
1421  }
1422  else {
1423  vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424  }
1425 }
1426 
1427 ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428 NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429 NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430 
1431 static VALUE
1432 vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433 {
1434 #if OPT_IC_FOR_IVAR
1435  RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436 
1437  if (BUILTIN_TYPE(obj) == T_OBJECT) {
1438  rb_check_frozen(obj);
1439 
1440  attr_index_t index = rb_obj_ivar_set(obj, id, val);
1441 
1442  shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1443 
1444  if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
1445  populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1446  }
1447 
1448  RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1449  return val;
1450  }
1451 #endif
1452  return rb_ivar_set(obj, id, val);
1453 }
1454 
1455 static VALUE
1456 vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1457 {
1458  return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1459 }
1460 
1461 static VALUE
1462 vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1463 {
1464  return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1465 }
1466 
1467 NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1468 static VALUE
1469 vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1470 {
1471 #if SHAPE_IN_BASIC_FLAGS
1472  shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1473 #else
1474  shape_id_t shape_id = rb_generic_shape_id(obj);
1475 #endif
1476 
1477  struct gen_ivtbl *ivtbl = 0;
1478 
1479  // Cache hit case
1480  if (shape_id == dest_shape_id) {
1481  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1482  }
1483  else if (dest_shape_id != INVALID_SHAPE_ID) {
1484  rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1485  rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1486 
1487  if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1488  RUBY_ASSERT(index < dest_shape->capacity);
1489  }
1490  else {
1491  return Qundef;
1492  }
1493  }
1494  else {
1495  return Qundef;
1496  }
1497 
1498  rb_gen_ivtbl_get(obj, 0, &ivtbl);
1499 
1500  if (shape_id != dest_shape_id) {
1501 #if SHAPE_IN_BASIC_FLAGS
1502  RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1503 #else
1504  ivtbl->shape_id = dest_shape_id;
1505 #endif
1506  }
1507 
1508  RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
1509 
1510  RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1511 
1512  return val;
1513 }
1514 
1515 static inline VALUE
1516 vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1517 {
1518 #if OPT_IC_FOR_IVAR
1519  switch (BUILTIN_TYPE(obj)) {
1520  case T_OBJECT:
1521  {
1522  VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1523 
1524  shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1525  RUBY_ASSERT(dest_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1526 
1527  if (LIKELY(shape_id == dest_shape_id)) {
1528  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1529  VM_ASSERT(!rb_ractor_shareable_p(obj));
1530  }
1531  else if (dest_shape_id != INVALID_SHAPE_ID) {
1532  rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1533  rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1534  shape_id_t source_shape_id = dest_shape->parent_id;
1535 
1536  if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1537  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1538 
1539  ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1540 
1541  RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id), id) == dest_shape);
1542  RUBY_ASSERT(index < dest_shape->capacity);
1543  }
1544  else {
1545  break;
1546  }
1547  }
1548  else {
1549  break;
1550  }
1551 
1552  VALUE *ptr = ROBJECT_IVPTR(obj);
1553 
1554  RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
1555  RB_OBJ_WRITE(obj, &ptr[index], val);
1556 
1557  RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1558  RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1559  return val;
1560  }
1561  break;
1562  case T_CLASS:
1563  case T_MODULE:
1564  RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1565  default:
1566  break;
1567  }
1568 
1569  return Qundef;
1570 #endif /* OPT_IC_FOR_IVAR */
1571 }
1572 
1573 static VALUE
1574 update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1575 {
1576  VALUE defined_class = 0;
1577  VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1578 
1579  if (RB_TYPE_P(defined_class, T_ICLASS)) {
1580  defined_class = RBASIC(defined_class)->klass;
1581  }
1582 
1583  struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1584  if (!rb_cvc_tbl) {
1585  rb_bug("the cvc table should be set");
1586  }
1587 
1588  VALUE ent_data;
1589  if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1590  rb_bug("should have cvar cache entry");
1591  }
1592 
1593  struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1594 
1595  ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1596  ent->cref = cref;
1597  ic->entry = ent;
1598 
1599  RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1600  RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1601  RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1602  RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1603 
1604  return cvar_value;
1605 }
1606 
1607 static inline VALUE
1608 vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1609 {
1610  const rb_cref_t *cref;
1611  cref = vm_get_cref(GET_EP());
1612 
1613  if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1614  RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1615 
1616  VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1617  RUBY_ASSERT(!UNDEF_P(v));
1618 
1619  return v;
1620  }
1621 
1622  VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1623 
1624  return update_classvariable_cache(iseq, klass, id, cref, ic);
1625 }
1626 
1627 VALUE
1628 rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1629 {
1630  return vm_getclassvariable(iseq, cfp, id, ic);
1631 }
1632 
1633 static inline void
1634 vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1635 {
1636  const rb_cref_t *cref;
1637  cref = vm_get_cref(GET_EP());
1638 
1639  if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1640  RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1641 
1642  rb_class_ivar_set(ic->entry->class_value, id, val);
1643  return;
1644  }
1645 
1646  VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647 
1648  rb_cvar_set(klass, id, val);
1649 
1650  update_classvariable_cache(iseq, klass, id, cref, ic);
1651 }
1652 
1653 void
1654 rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1655 {
1656  vm_setclassvariable(iseq, cfp, id, val, ic);
1657 }
1658 
1659 static inline VALUE
1660 vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1661 {
1662  return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1663 }
1664 
1665 static inline void
1666 vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1667 {
1668  if (RB_SPECIAL_CONST_P(obj)) {
1670  return;
1671  }
1672 
1673  shape_id_t dest_shape_id;
1674  attr_index_t index;
1675  vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1676 
1677  if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1678  switch (BUILTIN_TYPE(obj)) {
1679  case T_OBJECT:
1680  case T_CLASS:
1681  case T_MODULE:
1682  break;
1683  default:
1684  if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1685  return;
1686  }
1687  }
1688  vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1689  }
1690 }
1691 
1692 void
1693 rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1694 {
1695  vm_setinstancevariable(iseq, obj, id, val, ic);
1696 }
1697 
1698 static VALUE
1699 vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1700 {
1701  /* continue throw */
1702 
1703  if (FIXNUM_P(err)) {
1704  ec->tag->state = RUBY_TAG_FATAL;
1705  }
1706  else if (SYMBOL_P(err)) {
1707  ec->tag->state = TAG_THROW;
1708  }
1709  else if (THROW_DATA_P(err)) {
1710  ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1711  }
1712  else {
1713  ec->tag->state = TAG_RAISE;
1714  }
1715  return err;
1716 }
1717 
1718 static VALUE
1719 vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1720  const int flag, const VALUE throwobj)
1721 {
1722  const rb_control_frame_t *escape_cfp = NULL;
1723  const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1724 
1725  if (flag != 0) {
1726  /* do nothing */
1727  }
1728  else if (state == TAG_BREAK) {
1729  int is_orphan = 1;
1730  const VALUE *ep = GET_EP();
1731  const rb_iseq_t *base_iseq = GET_ISEQ();
1732  escape_cfp = reg_cfp;
1733 
1734  while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1735  if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1736  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1737  ep = escape_cfp->ep;
1738  base_iseq = escape_cfp->iseq;
1739  }
1740  else {
1741  ep = VM_ENV_PREV_EP(ep);
1742  base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1743  escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1744  VM_ASSERT(escape_cfp->iseq == base_iseq);
1745  }
1746  }
1747 
1748  if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1749  /* lambda{... break ...} */
1750  is_orphan = 0;
1751  state = TAG_RETURN;
1752  }
1753  else {
1754  ep = VM_ENV_PREV_EP(ep);
1755 
1756  while (escape_cfp < eocfp) {
1757  if (escape_cfp->ep == ep) {
1758  const rb_iseq_t *const iseq = escape_cfp->iseq;
1759  const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1760  const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1761  unsigned int i;
1762 
1763  if (!ct) break;
1764  for (i=0; i < ct->size; i++) {
1765  const struct iseq_catch_table_entry *const entry =
1766  UNALIGNED_MEMBER_PTR(ct, entries[i]);
1767 
1768  if (entry->type == CATCH_TYPE_BREAK &&
1769  entry->iseq == base_iseq &&
1770  entry->start < epc && entry->end >= epc) {
1771  if (entry->cont == epc) { /* found! */
1772  is_orphan = 0;
1773  }
1774  break;
1775  }
1776  }
1777  break;
1778  }
1779 
1780  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1781  }
1782  }
1783 
1784  if (is_orphan) {
1785  rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1786  }
1787  }
1788  else if (state == TAG_RETRY) {
1789  const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1790 
1791  escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1792  }
1793  else if (state == TAG_RETURN) {
1794  const VALUE *current_ep = GET_EP();
1795  const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1796  int in_class_frame = 0;
1797  int toplevel = 1;
1798  escape_cfp = reg_cfp;
1799 
1800  // find target_lep, target_ep
1801  while (!VM_ENV_LOCAL_P(ep)) {
1802  if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1803  target_ep = ep;
1804  }
1805  ep = VM_ENV_PREV_EP(ep);
1806  }
1807  target_lep = ep;
1808 
1809  while (escape_cfp < eocfp) {
1810  const VALUE *lep = VM_CF_LEP(escape_cfp);
1811 
1812  if (!target_lep) {
1813  target_lep = lep;
1814  }
1815 
1816  if (lep == target_lep &&
1817  VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1818  ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1819  in_class_frame = 1;
1820  target_lep = 0;
1821  }
1822 
1823  if (lep == target_lep) {
1824  if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1825  toplevel = 0;
1826  if (in_class_frame) {
1827  /* lambda {class A; ... return ...; end} */
1828  goto valid_return;
1829  }
1830  else {
1831  const VALUE *tep = current_ep;
1832 
1833  while (target_lep != tep) {
1834  if (escape_cfp->ep == tep) {
1835  /* in lambda */
1836  if (tep == target_ep) {
1837  goto valid_return;
1838  }
1839  else {
1840  goto unexpected_return;
1841  }
1842  }
1843  tep = VM_ENV_PREV_EP(tep);
1844  }
1845  }
1846  }
1847  else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1848  switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1849  case ISEQ_TYPE_TOP:
1850  case ISEQ_TYPE_MAIN:
1851  if (toplevel) {
1852  if (in_class_frame) goto unexpected_return;
1853  if (target_ep == NULL) {
1854  goto valid_return;
1855  }
1856  else {
1857  goto unexpected_return;
1858  }
1859  }
1860  break;
1861  case ISEQ_TYPE_EVAL: {
1862  const rb_iseq_t *is = escape_cfp->iseq;
1863  enum rb_iseq_type t = ISEQ_BODY(is)->type;
1864  while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1865  if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1866  t = ISEQ_BODY(is)->type;
1867  }
1868  toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1869  break;
1870  }
1871  case ISEQ_TYPE_CLASS:
1872  toplevel = 0;
1873  break;
1874  default:
1875  break;
1876  }
1877  }
1878  }
1879 
1880  if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1881  if (target_ep == NULL) {
1882  goto valid_return;
1883  }
1884  else {
1885  goto unexpected_return;
1886  }
1887  }
1888 
1889  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1890  }
1891  unexpected_return:;
1892  rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1893 
1894  valid_return:;
1895  /* do nothing */
1896  }
1897  else {
1898  rb_bug("isns(throw): unsupported throw type");
1899  }
1900 
1901  ec->tag->state = state;
1902  return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1903 }
1904 
1905 static VALUE
1906 vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1907  rb_num_t throw_state, VALUE throwobj)
1908 {
1909  const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1910  const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1911 
1912  if (state != 0) {
1913  return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1914  }
1915  else {
1916  return vm_throw_continue(ec, throwobj);
1917  }
1918 }
1919 
1920 VALUE
1921 rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1922 {
1923  return vm_throw(ec, reg_cfp, throw_state, throwobj);
1924 }
1925 
1926 static inline void
1927 vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1928 {
1929  int is_splat = flag & 0x01;
1930  const VALUE *ptr;
1931  rb_num_t len;
1932  const VALUE obj = ary;
1933 
1934  if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1935  ary = obj;
1936  ptr = &ary;
1937  len = 1;
1938  }
1939  else {
1940  ptr = RARRAY_CONST_PTR(ary);
1941  len = (rb_num_t)RARRAY_LEN(ary);
1942  }
1943 
1944  if (num + is_splat == 0) {
1945  /* no space left on stack */
1946  }
1947  else if (flag & 0x02) {
1948  /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1949  rb_num_t i = 0, j;
1950 
1951  if (len < num) {
1952  for (i = 0; i < num - len; i++) {
1953  *cfp->sp++ = Qnil;
1954  }
1955  }
1956 
1957  for (j = 0; i < num; i++, j++) {
1958  VALUE v = ptr[len - j - 1];
1959  *cfp->sp++ = v;
1960  }
1961 
1962  if (is_splat) {
1963  *cfp->sp++ = rb_ary_new4(len - j, ptr);
1964  }
1965  }
1966  else {
1967  /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1968  if (is_splat) {
1969  if (num > len) {
1970  *cfp->sp++ = rb_ary_new();
1971  }
1972  else {
1973  *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1974  }
1975  }
1976 
1977  if (num > len) {
1978  rb_num_t i = 0;
1979  for (; i < num - len; i++) {
1980  *cfp->sp++ = Qnil;
1981  }
1982 
1983  for (rb_num_t j = 0; i < num; i++, j++) {
1984  *cfp->sp++ = ptr[len - j - 1];
1985  }
1986  }
1987  else {
1988  for (rb_num_t j = 0; j < num; j++) {
1989  *cfp->sp++ = ptr[num - j - 1];
1990  }
1991  }
1992  }
1993 
1994  RB_GC_GUARD(ary);
1995 }
1996 
1997 static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1998 
1999 static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2000 
2001 static struct rb_class_cc_entries *
2002 vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2003 {
2004  struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
2005 #if VM_CHECK_MODE > 0
2006  ccs->debug_sig = ~(VALUE)ccs;
2007 #endif
2008  ccs->capa = 0;
2009  ccs->len = 0;
2010  ccs->cme = cme;
2011  METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2012  ccs->entries = NULL;
2013 
2014  rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2015  RB_OBJ_WRITTEN(klass, Qundef, cme);
2016  return ccs;
2017 }
2018 
2019 static void
2020 vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2021 {
2022  if (! vm_cc_markable(cc)) {
2023  return;
2024  }
2025 
2026  if (UNLIKELY(ccs->len == ccs->capa)) {
2027  if (ccs->capa == 0) {
2028  ccs->capa = 1;
2029  ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2030  }
2031  else {
2032  ccs->capa *= 2;
2033  REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2034  }
2035  }
2036  VM_ASSERT(ccs->len < ccs->capa);
2037 
2038  const int pos = ccs->len++;
2039  ccs->entries[pos].argc = vm_ci_argc(ci);
2040  ccs->entries[pos].flag = vm_ci_flag(ci);
2041  RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2042 
2043  if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2044  // for tuning
2045  // vm_mtbl_dump(klass, 0);
2046  }
2047 }
2048 
2049 #if VM_CHECK_MODE > 0
2050 void
2051 rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2052 {
2053  ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2054  for (int i=0; i<ccs->len; i++) {
2055  ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2056  ccs->entries[i].flag,
2057  ccs->entries[i].argc);
2058  rp(ccs->entries[i].cc);
2059  }
2060 }
2061 
2062 static int
2063 vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2064 {
2065  VM_ASSERT(vm_ccs_p(ccs));
2066  VM_ASSERT(ccs->len <= ccs->capa);
2067 
2068  for (int i=0; i<ccs->len; i++) {
2069  const struct rb_callcache *cc = ccs->entries[i].cc;
2070 
2071  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2072  VM_ASSERT(vm_cc_class_check(cc, klass));
2073  VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2074  VM_ASSERT(!vm_cc_super_p(cc));
2075  VM_ASSERT(!vm_cc_refinement_p(cc));
2076  }
2077  return TRUE;
2078 }
2079 #endif
2080 
2081 const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2082 
2083 static const struct rb_callcache *
2084 vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2085 {
2086  const ID mid = vm_ci_mid(ci);
2087  struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2088  struct rb_class_cc_entries *ccs = NULL;
2089  VALUE ccs_data;
2090 
2091  if (cc_tbl) {
2092  // CCS data is keyed on method id, so we don't need the method id
2093  // for doing comparisons in the `for` loop below.
2094  if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2095  ccs = (struct rb_class_cc_entries *)ccs_data;
2096  const int ccs_len = ccs->len;
2097 
2098  if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2099  rb_vm_ccs_free(ccs);
2100  rb_id_table_delete(cc_tbl, mid);
2101  ccs = NULL;
2102  }
2103  else {
2104  VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2105 
2106  // We already know the method id is correct because we had
2107  // to look up the ccs_data by method id. All we need to
2108  // compare is argc and flag
2109  unsigned int argc = vm_ci_argc(ci);
2110  unsigned int flag = vm_ci_flag(ci);
2111 
2112  for (int i=0; i<ccs_len; i++) {
2113  unsigned int ccs_ci_argc = ccs->entries[i].argc;
2114  unsigned int ccs_ci_flag = ccs->entries[i].flag;
2115  const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2116 
2117  VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2118 
2119  if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2120  RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2121 
2122  VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2123  VM_ASSERT(ccs_cc->klass == klass);
2124  VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2125 
2126  return ccs_cc;
2127  }
2128  }
2129  }
2130  }
2131  }
2132  else {
2133  cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
2134  }
2135 
2136  RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2137 
2138  const rb_callable_method_entry_t *cme;
2139 
2140  if (ccs) {
2141  cme = ccs->cme;
2142  cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2143 
2144  VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2145  }
2146  else {
2147  cme = rb_callable_method_entry(klass, mid);
2148  }
2149 
2150  VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2151 
2152  if (cme == NULL) {
2153  // undef or not found: can't cache the information
2154  VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2155  return &vm_empty_cc;
2156  }
2157 
2158  VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2159 
2160  METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2161 
2162  if (ccs == NULL) {
2163  VM_ASSERT(cc_tbl != NULL);
2164 
2165  if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2166  // rb_callable_method_entry() prepares ccs.
2167  ccs = (struct rb_class_cc_entries *)ccs_data;
2168  }
2169  else {
2170  // TODO: required?
2171  ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2172  }
2173  }
2174 
2175  cme = rb_check_overloaded_cme(cme, ci);
2176 
2177  const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2178  vm_ccs_push(klass, ccs, ci, cc);
2179 
2180  VM_ASSERT(vm_cc_cme(cc) != NULL);
2181  VM_ASSERT(cme->called_id == mid);
2182  VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2183 
2184  return cc;
2185 }
2186 
2187 const struct rb_callcache *
2188 rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2189 {
2190  const struct rb_callcache *cc;
2191 
2192  VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2193 
2194  RB_VM_LOCK_ENTER();
2195  {
2196  cc = vm_search_cc(klass, ci);
2197 
2198  VM_ASSERT(cc);
2199  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2200  VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2201  VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2202  VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2203  VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2204  }
2205  RB_VM_LOCK_LEAVE();
2206 
2207  return cc;
2208 }
2209 
2210 static const struct rb_callcache *
2211 vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2212 {
2213 #if USE_DEBUG_COUNTER
2214  const struct rb_callcache *old_cc = cd->cc;
2215 #endif
2216 
2217  const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2218 
2219 #if OPT_INLINE_METHOD_CACHE
2220  cd->cc = cc;
2221 
2222  const struct rb_callcache *empty_cc = &vm_empty_cc;
2223  if (cd_owner && cc != empty_cc) {
2224  RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2225  }
2226 
2227 #if USE_DEBUG_COUNTER
2228  if (!old_cc || old_cc == empty_cc) {
2229  // empty
2230  RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2231  }
2232  else if (old_cc == cc) {
2233  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2234  }
2235  else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2236  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2237  }
2238  else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2239  vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2240  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2241  }
2242  else {
2243  RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2244  }
2245 #endif
2246 #endif // OPT_INLINE_METHOD_CACHE
2247 
2248  VM_ASSERT(vm_cc_cme(cc) == NULL ||
2249  vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2250 
2251  return cc;
2252 }
2253 
2254 ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2255 static const struct rb_callcache *
2256 vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2257 {
2258  const struct rb_callcache *cc = cd->cc;
2259 
2260 #if OPT_INLINE_METHOD_CACHE
2261  if (LIKELY(vm_cc_class_check(cc, klass))) {
2262  if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2263  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2264  RB_DEBUG_COUNTER_INC(mc_inline_hit);
2265  VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2266  (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2267  vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2268 
2269  return cc;
2270  }
2271  RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2272  }
2273  else {
2274  RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2275  }
2276 #endif
2277 
2278  return vm_search_method_slowpath0(cd_owner, cd, klass);
2279 }
2280 
2281 static const struct rb_callcache *
2282 vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2283 {
2284  VALUE klass = CLASS_OF(recv);
2285  VM_ASSERT(klass != Qfalse);
2286  VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2287 
2288  return vm_search_method_fastpath(cd_owner, cd, klass);
2289 }
2290 
2291 #if __has_attribute(transparent_union)
2292 typedef union {
2293  VALUE (*anyargs)(ANYARGS);
2294  VALUE (*f00)(VALUE);
2295  VALUE (*f01)(VALUE, VALUE);
2296  VALUE (*f02)(VALUE, VALUE, VALUE);
2297  VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2298  VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2299  VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2300  VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2301  VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2302  VALUE (*f08)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2310  VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2311 } __attribute__((__transparent_union__)) cfunc_type;
2312 # define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2313 #else
2314 typedef VALUE (*cfunc_type)(ANYARGS);
2315 # define make_cfunc_type(f) (cfunc_type)(f)
2316 #endif
2317 
2318 static inline int
2319 check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2320 {
2321  if (! me) {
2322  return false;
2323  }
2324  else {
2325  VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2326  VM_ASSERT(callable_method_entry_p(me));
2327  VM_ASSERT(me->def);
2328  if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2329  return false;
2330  }
2331  else {
2332 #if __has_attribute(transparent_union)
2333  return me->def->body.cfunc.func == func.anyargs;
2334 #else
2335  return me->def->body.cfunc.func == func;
2336 #endif
2337  }
2338  }
2339 }
2340 
2341 static inline int
2342 vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2343 {
2344  VM_ASSERT(iseq != NULL);
2345  const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2346  return check_cfunc(vm_cc_cme(cc), func);
2347 }
2348 
2349 #define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2350 #define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2351 
2352 #define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2353 
2354 static inline bool
2355 FIXNUM_2_P(VALUE a, VALUE b)
2356 {
2357  /* FIXNUM_P(a) && FIXNUM_P(b)
2358  * == ((a & 1) && (b & 1))
2359  * == a & b & 1 */
2360  SIGNED_VALUE x = a;
2361  SIGNED_VALUE y = b;
2362  SIGNED_VALUE z = x & y & 1;
2363  return z == 1;
2364 }
2365 
2366 static inline bool
2367 FLONUM_2_P(VALUE a, VALUE b)
2368 {
2369 #if USE_FLONUM
2370  /* FLONUM_P(a) && FLONUM_P(b)
2371  * == ((a & 3) == 2) && ((b & 3) == 2)
2372  * == ! ((a ^ 2) | (b ^ 2) & 3)
2373  */
2374  SIGNED_VALUE x = a;
2375  SIGNED_VALUE y = b;
2376  SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2377  return !z;
2378 #else
2379  return false;
2380 #endif
2381 }
2382 
2383 static VALUE
2384 opt_equality_specialized(VALUE recv, VALUE obj)
2385 {
2386  if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2387  goto compare_by_identity;
2388  }
2389  else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2390  goto compare_by_identity;
2391  }
2392  else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2393  goto compare_by_identity;
2394  }
2395  else if (SPECIAL_CONST_P(recv)) {
2396  //
2397  }
2398  else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2399  double a = RFLOAT_VALUE(recv);
2400  double b = RFLOAT_VALUE(obj);
2401 
2402 #if MSC_VERSION_BEFORE(1300)
2403  if (isnan(a)) {
2404  return Qfalse;
2405  }
2406  else if (isnan(b)) {
2407  return Qfalse;
2408  }
2409  else
2410 #endif
2411  return RBOOL(a == b);
2412  }
2413  else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2414  if (recv == obj) {
2415  return Qtrue;
2416  }
2417  else if (RB_TYPE_P(obj, T_STRING)) {
2418  return rb_str_eql_internal(obj, recv);
2419  }
2420  }
2421  return Qundef;
2422 
2423  compare_by_identity:
2424  return RBOOL(recv == obj);
2425 }
2426 
2427 static VALUE
2428 opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2429 {
2430  VM_ASSERT(cd_owner != NULL);
2431 
2432  VALUE val = opt_equality_specialized(recv, obj);
2433  if (!UNDEF_P(val)) return val;
2434 
2435  if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2436  return Qundef;
2437  }
2438  else {
2439  return RBOOL(recv == obj);
2440  }
2441 }
2442 
2443 #undef EQ_UNREDEFINED_P
2444 
2445 static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2446 NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2447 
2448 static VALUE
2449 opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2450 {
2451  const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2452 
2453  if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2454  return RBOOL(recv == obj);
2455  }
2456  else {
2457  return Qundef;
2458  }
2459 }
2460 
2461 static VALUE
2462 opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2463 {
2464  VALUE val = opt_equality_specialized(recv, obj);
2465  if (!UNDEF_P(val)) {
2466  return val;
2467  }
2468  else {
2469  return opt_equality_by_mid_slowpath(recv, obj, mid);
2470  }
2471 }
2472 
2473 VALUE
2474 rb_equal_opt(VALUE obj1, VALUE obj2)
2475 {
2476  return opt_equality_by_mid(obj1, obj2, idEq);
2477 }
2478 
2479 VALUE
2480 rb_eql_opt(VALUE obj1, VALUE obj2)
2481 {
2482  return opt_equality_by_mid(obj1, obj2, idEqlP);
2483 }
2484 
2485 extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2486 extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2487 
2488 static VALUE
2489 check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2490 {
2491  switch (type) {
2492  case VM_CHECKMATCH_TYPE_WHEN:
2493  return pattern;
2494  case VM_CHECKMATCH_TYPE_RESCUE:
2495  if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2496  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2497  }
2498  /* fall through */
2499  case VM_CHECKMATCH_TYPE_CASE: {
2500  return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2501  }
2502  default:
2503  rb_bug("check_match: unreachable");
2504  }
2505 }
2506 
2507 
2508 #if MSC_VERSION_BEFORE(1300)
2509 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2510 #else
2511 #define CHECK_CMP_NAN(a, b) /* do nothing */
2512 #endif
2513 
2514 static inline VALUE
2515 double_cmp_lt(double a, double b)
2516 {
2517  CHECK_CMP_NAN(a, b);
2518  return RBOOL(a < b);
2519 }
2520 
2521 static inline VALUE
2522 double_cmp_le(double a, double b)
2523 {
2524  CHECK_CMP_NAN(a, b);
2525  return RBOOL(a <= b);
2526 }
2527 
2528 static inline VALUE
2529 double_cmp_gt(double a, double b)
2530 {
2531  CHECK_CMP_NAN(a, b);
2532  return RBOOL(a > b);
2533 }
2534 
2535 static inline VALUE
2536 double_cmp_ge(double a, double b)
2537 {
2538  CHECK_CMP_NAN(a, b);
2539  return RBOOL(a >= b);
2540 }
2541 
2542 // Copied by vm_dump.c
2543 static inline VALUE *
2544 vm_base_ptr(const rb_control_frame_t *cfp)
2545 {
2546  const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2547 
2548  if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2549  VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2550 
2551  if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2552  int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2553  int params = ISEQ_BODY(cfp->iseq)->param.size;
2554 
2555  CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2556  bp += vm_ci_argc(ci);
2557  }
2558 
2559  if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2560  /* adjust `self' */
2561  bp += 1;
2562  }
2563 #if VM_DEBUG_BP_CHECK
2564  if (bp != cfp->bp_check) {
2565  ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2566  (long)(cfp->bp_check - GET_EC()->vm_stack),
2567  (long)(bp - GET_EC()->vm_stack));
2568  rb_bug("vm_base_ptr: unreachable");
2569  }
2570 #endif
2571  return bp;
2572  }
2573  else {
2574  return NULL;
2575  }
2576 }
2577 
2578 VALUE *
2579 rb_vm_base_ptr(const rb_control_frame_t *cfp)
2580 {
2581  return vm_base_ptr(cfp);
2582 }
2583 
2584 /* method call processes with call_info */
2585 
2586 #include "vm_args.c"
2587 
2588 static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2589 ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2590 static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2591 static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2592 static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2593 static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2594 static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2595 
2596 static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2597 
2598 static VALUE
2599 vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2600 {
2601  RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2602 
2603  return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2604 }
2605 
2606 static VALUE
2607 vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2608 {
2609  RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2610 
2611  const struct rb_callcache *cc = calling->cc;
2612  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2613  int param = ISEQ_BODY(iseq)->param.size;
2614  int local = ISEQ_BODY(iseq)->local_table_size;
2615  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2616 }
2617 
2618 bool
2619 rb_simple_iseq_p(const rb_iseq_t *iseq)
2620 {
2621  return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2622  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2623  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2624  ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2625  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2626  ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2627  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2628  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2629 }
2630 
2631 bool
2632 rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2633 {
2634  return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2635  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2636  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2637  ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2638  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2639  ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2640  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2641  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2642 }
2643 
2644 bool
2645 rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2646 {
2647  return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2648  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2649  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2650  ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2651  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2652  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2653  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2654 }
2655 
2656 #define ALLOW_HEAP_ARGV (-2)
2657 #define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2658 
2659 static inline bool
2660 vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2661 {
2662  vm_check_canary(GET_EC(), cfp->sp);
2663  bool ret = false;
2664 
2665  if (!NIL_P(ary)) {
2666  const VALUE *ptr = RARRAY_CONST_PTR(ary);
2667  long len = RARRAY_LEN(ary);
2668  int argc = calling->argc;
2669 
2670  if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2671  /* Avoid SystemStackError when splatting large arrays by storing arguments in
2672  * a temporary array, instead of trying to keeping arguments on the VM stack.
2673  */
2674  VALUE *argv = cfp->sp - argc;
2675  VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2676  rb_ary_cat(argv_ary, argv, argc);
2677  rb_ary_cat(argv_ary, ptr, len);
2678  cfp->sp -= argc - 1;
2679  cfp->sp[-1] = argv_ary;
2680  calling->argc = 1;
2681  calling->heap_argv = argv_ary;
2682  RB_GC_GUARD(ary);
2683  }
2684  else {
2685  long i;
2686 
2687  if (max_args >= 0 && len + argc > max_args) {
2688  /* If only a given max_args is allowed, copy up to max args.
2689  * Used by vm_callee_setup_block_arg for non-lambda blocks,
2690  * where additional arguments are ignored.
2691  *
2692  * Also, copy up to one more argument than the maximum,
2693  * in case it is an empty keyword hash that will be removed.
2694  */
2695  calling->argc += len - (max_args - argc + 1);
2696  len = max_args - argc + 1;
2697  ret = true;
2698  }
2699  else {
2700  /* Unset heap_argv if set originally. Can happen when
2701  * forwarding modified arguments, where heap_argv was used
2702  * originally, but heap_argv not supported by the forwarded
2703  * method in all cases.
2704  */
2705  calling->heap_argv = 0;
2706  }
2707  CHECK_VM_STACK_OVERFLOW(cfp, len);
2708 
2709  for (i = 0; i < len; i++) {
2710  *cfp->sp++ = ptr[i];
2711  }
2712  calling->argc += i;
2713  }
2714  }
2715 
2716  return ret;
2717 }
2718 
2719 static inline void
2720 vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2721 {
2722  const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2723  const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2724  const VALUE h = rb_hash_new_with_size(kw_len);
2725  VALUE *sp = cfp->sp;
2726  int i;
2727 
2728  for (i=0; i<kw_len; i++) {
2729  rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2730  }
2731  (sp-kw_len)[0] = h;
2732 
2733  cfp->sp -= kw_len - 1;
2734  calling->argc -= kw_len - 1;
2735  calling->kw_splat = 1;
2736 }
2737 
2738 static inline VALUE
2739 vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2740 {
2741  if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2742  if (keyword_hash != Qnil) {
2743  /* Convert a non-hash keyword splat to a new hash */
2744  keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2745  }
2746  }
2747  else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2748  /* Convert a hash keyword splat to a new hash unless
2749  * a mutable keyword splat was passed.
2750  * Skip allocating new hash for empty keyword splat, as empty
2751  * keyword splat will be ignored by both callers.
2752  */
2753  keyword_hash = rb_hash_dup(keyword_hash);
2754  }
2755  return keyword_hash;
2756 }
2757 
2758 static inline void
2759 CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2760  struct rb_calling_info *restrict calling,
2761  const struct rb_callinfo *restrict ci, int max_args)
2762 {
2763  if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2764  if (IS_ARGS_KW_SPLAT(ci)) {
2765  // f(*a, **kw)
2766  VM_ASSERT(calling->kw_splat == 1);
2767 
2768  cfp->sp -= 2;
2769  calling->argc -= 2;
2770  VALUE ary = cfp->sp[0];
2771  VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2772 
2773  // splat a
2774  if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2775 
2776  // put kw
2777  if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2778  if (UNLIKELY(calling->heap_argv)) {
2779  rb_ary_push(calling->heap_argv, kwh);
2780  ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2781  if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2782  calling->kw_splat = 0;
2783  }
2784  }
2785  else {
2786  cfp->sp[0] = kwh;
2787  cfp->sp++;
2788  calling->argc++;
2789 
2790  VM_ASSERT(calling->kw_splat == 1);
2791  }
2792  }
2793  else {
2794  calling->kw_splat = 0;
2795  }
2796  }
2797  else {
2798  // f(*a)
2799  VM_ASSERT(calling->kw_splat == 0);
2800 
2801  cfp->sp -= 1;
2802  calling->argc -= 1;
2803  VALUE ary = cfp->sp[0];
2804 
2805  if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2806  goto check_keyword;
2807  }
2808 
2809  // check the last argument
2810  VALUE last_hash, argv_ary;
2811  if (UNLIKELY(argv_ary = calling->heap_argv)) {
2812  if (!IS_ARGS_KEYWORD(ci) &&
2813  RARRAY_LEN(argv_ary) > 0 &&
2814  RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2815  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2816 
2817  rb_ary_pop(argv_ary);
2818  if (!RHASH_EMPTY_P(last_hash)) {
2819  rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2820  calling->kw_splat = 1;
2821  }
2822  }
2823  }
2824  else {
2825 check_keyword:
2826  if (!IS_ARGS_KEYWORD(ci) &&
2827  calling->argc > 0 &&
2828  RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2829  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2830 
2831  if (RHASH_EMPTY_P(last_hash)) {
2832  calling->argc--;
2833  cfp->sp -= 1;
2834  }
2835  else {
2836  cfp->sp[-1] = rb_hash_dup(last_hash);
2837  calling->kw_splat = 1;
2838  }
2839  }
2840  }
2841  }
2842  }
2843  else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2844  // f(**kw)
2845  VM_ASSERT(calling->kw_splat == 1);
2846  VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2847 
2848  if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2849  cfp->sp--;
2850  calling->argc--;
2851  calling->kw_splat = 0;
2852  }
2853  else {
2854  cfp->sp[-1] = kwh;
2855  }
2856  }
2857  else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2858  // f(k1:1, k2:2)
2859  VM_ASSERT(calling->kw_splat == 0);
2860 
2861  /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2862  * by creating a keyword hash.
2863  * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2864  */
2865  vm_caller_setup_arg_kw(cfp, calling, ci);
2866  }
2867 }
2868 
2869 #define USE_OPT_HIST 0
2870 
2871 #if USE_OPT_HIST
2872 #define OPT_HIST_MAX 64
2873 static int opt_hist[OPT_HIST_MAX+1];
2874 
2875 __attribute__((destructor))
2876 static void
2877 opt_hist_show_results_at_exit(void)
2878 {
2879  for (int i=0; i<OPT_HIST_MAX; i++) {
2880  ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2881  }
2882 }
2883 #endif
2884 
2885 static VALUE
2886 vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2887  struct rb_calling_info *calling)
2888 {
2889  const struct rb_callcache *cc = calling->cc;
2890  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2891  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2892  const int opt = calling->argc - lead_num;
2893  const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2894  const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2895  const int param = ISEQ_BODY(iseq)->param.size;
2896  const int local = ISEQ_BODY(iseq)->local_table_size;
2897  const int delta = opt_num - opt;
2898 
2899  RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2900 
2901 #if USE_OPT_HIST
2902  if (opt_pc < OPT_HIST_MAX) {
2903  opt_hist[opt]++;
2904  }
2905  else {
2906  opt_hist[OPT_HIST_MAX]++;
2907  }
2908 #endif
2909 
2910  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2911 }
2912 
2913 static VALUE
2914 vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2915  struct rb_calling_info *calling)
2916 {
2917  const struct rb_callcache *cc = calling->cc;
2918  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2919  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2920  const int opt = calling->argc - lead_num;
2921  const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2922 
2923  RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2924 
2925 #if USE_OPT_HIST
2926  if (opt_pc < OPT_HIST_MAX) {
2927  opt_hist[opt]++;
2928  }
2929  else {
2930  opt_hist[OPT_HIST_MAX]++;
2931  }
2932 #endif
2933 
2934  return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2935 }
2936 
2937 static void
2938 args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2939  VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2940  VALUE *const locals);
2941 
2942 static VALUE
2943 vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2944  struct rb_calling_info *calling)
2945 {
2946  const struct rb_callcache *cc = calling->cc;
2947  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2948  int param_size = ISEQ_BODY(iseq)->param.size;
2949  int local_size = ISEQ_BODY(iseq)->local_table_size;
2950 
2951  // Setting up local size and param size
2952  VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2953 
2954  local_size = local_size + vm_ci_argc(calling->cd->ci);
2955  param_size = param_size + vm_ci_argc(calling->cd->ci);
2956 
2957  cfp->sp[0] = (VALUE)calling->cd->ci;
2958 
2959  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2960 }
2961 
2962 static VALUE
2963 vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2964  struct rb_calling_info *calling)
2965 {
2966  const struct rb_callinfo *ci = calling->cd->ci;
2967  const struct rb_callcache *cc = calling->cc;
2968 
2969  VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2970  RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2971 
2972  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2973  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2974  const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2975  const int ci_kw_len = kw_arg->keyword_len;
2976  const VALUE * const ci_keywords = kw_arg->keywords;
2977  VALUE *argv = cfp->sp - calling->argc;
2978  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2979  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2980  VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2981  MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2982  args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2983 
2984  int param = ISEQ_BODY(iseq)->param.size;
2985  int local = ISEQ_BODY(iseq)->local_table_size;
2986  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2987 }
2988 
2989 static VALUE
2990 vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2991  struct rb_calling_info *calling)
2992 {
2993  const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2994  const struct rb_callcache *cc = calling->cc;
2995 
2996  VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2997  RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2998 
2999  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3000  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3001  VALUE * const argv = cfp->sp - calling->argc;
3002  VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3003 
3004  int i;
3005  for (i=0; i<kw_param->num; i++) {
3006  klocals[i] = kw_param->default_values[i];
3007  }
3008  klocals[i] = INT2FIX(0); // kw specify flag
3009  // NOTE:
3010  // nobody check this value, but it should be cleared because it can
3011  // points invalid VALUE (T_NONE objects, raw pointer and so on).
3012 
3013  int param = ISEQ_BODY(iseq)->param.size;
3014  int local = ISEQ_BODY(iseq)->local_table_size;
3015  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3016 }
3017 
3018 static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3019 
3020 static VALUE
3021 vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3022  struct rb_calling_info *calling)
3023 {
3024  const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3025  cfp->sp -= (calling->argc + 1);
3026  rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3027  return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3028 }
3029 
3030 VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3031 
3032 static void
3033 warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3034 {
3035  rb_vm_t *vm = GET_VM();
3036  st_table *dup_check_table = vm->unused_block_warning_table;
3037  st_data_t key;
3038  bool strict_unused_block = rb_warning_category_enabled_p(RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK);
3039 
3040  union {
3041  VALUE v;
3042  unsigned char b[SIZEOF_VALUE];
3043  } k1 = {
3044  .v = (VALUE)pc,
3045  }, k2 = {
3046  .v = (VALUE)cme->def,
3047  };
3048 
3049  // relax check
3050  if (!strict_unused_block) {
3051  key = (st_data_t)cme->def->original_id;
3052 
3053  if (st_lookup(dup_check_table, key, NULL)) {
3054  return;
3055  }
3056  }
3057 
3058  // strict check
3059  // make unique key from pc and me->def pointer
3060  key = 0;
3061  for (int i=0; i<SIZEOF_VALUE; i++) {
3062  // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3063  key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3064  }
3065 
3066  if (0) {
3067  fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3068  fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3069  fprintf(stderr, "key:%p\n", (void *)key);
3070  }
3071 
3072  // duplication check
3073  if (st_insert(dup_check_table, key, 1)) {
3074  // already shown
3075  }
3076  else if (RTEST(ruby_verbose) || strict_unused_block) {
3077  VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3078  VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3079 
3080  if (!NIL_P(m_loc)) {
3081  rb_warn("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3082  name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3083  }
3084  else {
3085  rb_warn("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3086  }
3087  }
3088 }
3089 
3090 static inline int
3091 vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3092  const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3093 {
3094  const struct rb_callinfo *ci = calling->cd->ci;
3095  const struct rb_callcache *cc = calling->cc;
3096 
3097  VM_ASSERT((vm_ci_argc(ci), 1));
3098  VM_ASSERT(vm_cc_cme(cc) != NULL);
3099 
3100  if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3101  calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3102  !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3103  warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3104  }
3105 
3106  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3107  if (LIKELY(rb_simple_iseq_p(iseq))) {
3108  rb_control_frame_t *cfp = ec->cfp;
3109  int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3110  CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3111 
3112  if (calling->argc != lead_num) {
3113  argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3114  }
3115 
3116  //VM_ASSERT(ci == calling->cd->ci);
3117  VM_ASSERT(cc == calling->cc);
3118 
3119  if (vm_call_iseq_optimizable_p(ci, cc)) {
3120  if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3121  !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3122  VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3123  vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3124  CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3125  }
3126  else {
3127  CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3128  }
3129  }
3130  return 0;
3131  }
3132  else if (rb_iseq_only_optparam_p(iseq)) {
3133  rb_control_frame_t *cfp = ec->cfp;
3134 
3135  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3136  const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3137 
3138  CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3139  const int argc = calling->argc;
3140  const int opt = argc - lead_num;
3141 
3142  if (opt < 0 || opt > opt_num) {
3143  argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3144  }
3145 
3146  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3147  CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3148  !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3149  vm_call_cacheable(ci, cc));
3150  }
3151  else {
3152  CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3153  !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3154  vm_call_cacheable(ci, cc));
3155  }
3156 
3157  /* initialize opt vars for self-references */
3158  VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3159  for (int i=argc; i<lead_num + opt_num; i++) {
3160  argv[i] = Qnil;
3161  }
3162  return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3163  }
3164  else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3165  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3166  const int argc = calling->argc;
3167  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3168 
3169  if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3170  const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3171 
3172  if (argc - kw_arg->keyword_len == lead_num) {
3173  const int ci_kw_len = kw_arg->keyword_len;
3174  const VALUE * const ci_keywords = kw_arg->keywords;
3175  VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3176  MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3177 
3178  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3179  args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3180 
3181  CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3182  vm_call_cacheable(ci, cc));
3183 
3184  return 0;
3185  }
3186  }
3187  else if (argc == lead_num) {
3188  /* no kwarg */
3189  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3190  args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3191 
3192  if (klocals[kw_param->num] == INT2FIX(0)) {
3193  /* copy from default_values */
3194  CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3195  vm_call_cacheable(ci, cc));
3196  }
3197 
3198  return 0;
3199  }
3200  }
3201  }
3202 
3203  // Called iseq is using ... param
3204  // def foo(...) # <- iseq for foo will have "forwardable"
3205  //
3206  // We want to set the `...` local to the caller's CI
3207  // foo(1, 2) # <- the ci for this should end up as `...`
3208  //
3209  // So hopefully the stack looks like:
3210  //
3211  // => 1
3212  // => 2
3213  // => *
3214  // => **
3215  // => &
3216  // => ... # <- points at `foo`s CI
3217  // => cref_or_me
3218  // => specval
3219  // => type
3220  //
3221  if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3222  bool can_fastpath = true;
3223 
3224  if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3225  struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3226  if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3227  ci = vm_ci_new_runtime(
3228  vm_ci_mid(ci),
3229  vm_ci_flag(ci),
3230  vm_ci_argc(ci),
3231  vm_ci_kwarg(ci));
3232  } else {
3233  ci = forward_cd->caller_ci;
3234  }
3235  can_fastpath = false;
3236  }
3237  // C functions calling iseqs will stack allocate a CI,
3238  // so we need to convert it to heap allocated
3239  if (!vm_ci_markable(ci)) {
3240  ci = vm_ci_new_runtime(
3241  vm_ci_mid(ci),
3242  vm_ci_flag(ci),
3243  vm_ci_argc(ci),
3244  vm_ci_kwarg(ci));
3245  can_fastpath = false;
3246  }
3247  argv[param_size - 1] = (VALUE)ci;
3248  CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3249  return 0;
3250  }
3251 
3252  return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3253 }
3254 
3255 static void
3256 vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3257 {
3258  // This case is when the caller is using a ... parameter.
3259  // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3260  // In this case the caller's caller's CI will be on the stack.
3261  //
3262  // For example:
3263  //
3264  // def bar(a, b); a + b; end
3265  // def foo(...); bar(...); end
3266  // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3267  //
3268  // Stack layout will be:
3269  //
3270  // > 1
3271  // > 2
3272  // > CI for foo(1, 2)
3273  // > cref_or_me
3274  // > specval
3275  // > type
3276  // > receiver
3277  // > CI for foo(1, 2), via `getlocal ...`
3278  // > ( SP points here )
3279  const VALUE * lep = VM_CF_LEP(cfp);
3280 
3281  const rb_iseq_t *iseq;
3282 
3283  // If we're in an escaped environment (lambda for example), get the iseq
3284  // from the captured env.
3285  if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3286  rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3287  iseq = env->iseq;
3288  }
3289  else { // Otherwise use the lep to find the caller
3290  iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3291  }
3292 
3293  // Our local storage is below the args we need to copy
3294  int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3295 
3296  const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3297  VALUE * to = cfp->sp - 1; // clobber the CI
3298 
3299  if (RTEST(splat)) {
3300  to -= 1; // clobber the splat array
3301  CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3302  MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3303  to += RARRAY_LEN(splat);
3304  }
3305 
3306  CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3307  MEMCPY(to, from, VALUE, argc);
3308  cfp->sp = to + argc;
3309 
3310  // Stack layout should now be:
3311  //
3312  // > 1
3313  // > 2
3314  // > CI for foo(1, 2)
3315  // > cref_or_me
3316  // > specval
3317  // > type
3318  // > receiver
3319  // > 1
3320  // > 2
3321  // > ( SP points here )
3322 }
3323 
3324 static VALUE
3325 vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3326 {
3327  RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3328 
3329  const struct rb_callcache *cc = calling->cc;
3330  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3331  int param_size = ISEQ_BODY(iseq)->param.size;
3332  int local_size = ISEQ_BODY(iseq)->local_table_size;
3333 
3334  RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3335 
3336  const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3337  return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3338 }
3339 
3340 static VALUE
3341 vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3342 {
3343  RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3344 
3345  const struct rb_callcache *cc = calling->cc;
3346  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3347  int param_size = ISEQ_BODY(iseq)->param.size;
3348  int local_size = ISEQ_BODY(iseq)->local_table_size;
3349 
3350  RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3351 
3352  // Setting up local size and param size
3353  local_size = local_size + vm_ci_argc(calling->cd->ci);
3354  param_size = param_size + vm_ci_argc(calling->cd->ci);
3355 
3356  const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3357  return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3358 }
3359 
3360 static inline VALUE
3361 vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3362  int opt_pc, int param_size, int local_size)
3363 {
3364  const struct rb_callinfo *ci = calling->cd->ci;
3365  const struct rb_callcache *cc = calling->cc;
3366 
3367  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3368  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3369  }
3370  else {
3371  return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3372  }
3373 }
3374 
3375 static inline VALUE
3376 vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3377  int opt_pc, int param_size, int local_size)
3378 {
3379  const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3380  VALUE *argv = cfp->sp - calling->argc;
3381  VALUE *sp = argv + param_size;
3382  cfp->sp = argv - 1 /* recv */;
3383 
3384  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3385  calling->block_handler, (VALUE)me,
3386  ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3387  local_size - param_size,
3388  ISEQ_BODY(iseq)->stack_max);
3389  return Qundef;
3390 }
3391 
3392 static inline VALUE
3393 vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3394 {
3395  const struct rb_callcache *cc = calling->cc;
3396  unsigned int i;
3397  VALUE *argv = cfp->sp - calling->argc;
3398  const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3399  const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3400  VALUE *src_argv = argv;
3401  VALUE *sp_orig, *sp;
3402  VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3403 
3404  if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3405  struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3406  const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3407  dst_captured->code.val = src_captured->code.val;
3408  if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3409  calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3410  }
3411  else {
3412  calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3413  }
3414  }
3415 
3416  vm_pop_frame(ec, cfp, cfp->ep);
3417  cfp = ec->cfp;
3418 
3419  sp_orig = sp = cfp->sp;
3420 
3421  /* push self */
3422  sp[0] = calling->recv;
3423  sp++;
3424 
3425  /* copy arguments */
3426  for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3427  *sp++ = src_argv[i];
3428  }
3429 
3430  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3431  calling->recv, calling->block_handler, (VALUE)me,
3432  ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3433  ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3434  ISEQ_BODY(iseq)->stack_max);
3435 
3436  cfp->sp = sp_orig;
3437 
3438  return Qundef;
3439 }
3440 
3441 static void
3442 ractor_unsafe_check(void)
3443 {
3444  if (!rb_ractor_main_p()) {
3445  rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3446  }
3447 }
3448 
3449 static VALUE
3450 call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3451 {
3452  ractor_unsafe_check();
3453  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3454  return (*f)(recv, rb_ary_new4(argc, argv));
3455 }
3456 
3457 static VALUE
3458 call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3459 {
3460  ractor_unsafe_check();
3461  VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3462  return (*f)(argc, argv, recv);
3463 }
3464 
3465 static VALUE
3466 call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3467 {
3468  ractor_unsafe_check();
3469  VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3470  return (*f)(recv);
3471 }
3472 
3473 static VALUE
3474 call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3475 {
3476  ractor_unsafe_check();
3477  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3478  return (*f)(recv, argv[0]);
3479 }
3480 
3481 static VALUE
3482 call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3483 {
3484  ractor_unsafe_check();
3485  VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3486  return (*f)(recv, argv[0], argv[1]);
3487 }
3488 
3489 static VALUE
3490 call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3491 {
3492  ractor_unsafe_check();
3493  VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3494  return (*f)(recv, argv[0], argv[1], argv[2]);
3495 }
3496 
3497 static VALUE
3498 call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3499 {
3500  ractor_unsafe_check();
3501  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3502  return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3503 }
3504 
3505 static VALUE
3506 call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3507 {
3508  ractor_unsafe_check();
3509  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3510  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3511 }
3512 
3513 static VALUE
3514 call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3515 {
3516  ractor_unsafe_check();
3518  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3519 }
3520 
3521 static VALUE
3522 call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3523 {
3524  ractor_unsafe_check();
3526  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3527 }
3528 
3529 static VALUE
3530 call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3531 {
3532  ractor_unsafe_check();
3534  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3535 }
3536 
3537 static VALUE
3538 call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3539 {
3540  ractor_unsafe_check();
3542  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3543 }
3544 
3545 static VALUE
3546 call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3547 {
3548  ractor_unsafe_check();
3550  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3551 }
3552 
3553 static VALUE
3554 call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3555 {
3556  ractor_unsafe_check();
3558  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3559 }
3560 
3561 static VALUE
3562 call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3563 {
3564  ractor_unsafe_check();
3566  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3567 }
3568 
3569 static VALUE
3570 call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3571 {
3572  ractor_unsafe_check();
3574  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3575 }
3576 
3577 static VALUE
3578 call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3579 {
3580  ractor_unsafe_check();
3582  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3583 }
3584 
3585 static VALUE
3586 call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3587 {
3588  ractor_unsafe_check();
3590  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3591 }
3592 
3593 static VALUE
3594 ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3595 {
3596  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3597  return (*f)(recv, rb_ary_new4(argc, argv));
3598 }
3599 
3600 static VALUE
3601 ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3602 {
3603  VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3604  return (*f)(argc, argv, recv);
3605 }
3606 
3607 static VALUE
3608 ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3609 {
3610  VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3611  return (*f)(recv);
3612 }
3613 
3614 static VALUE
3615 ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3616 {
3617  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3618  return (*f)(recv, argv[0]);
3619 }
3620 
3621 static VALUE
3622 ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3623 {
3624  VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3625  return (*f)(recv, argv[0], argv[1]);
3626 }
3627 
3628 static VALUE
3629 ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3630 {
3631  VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3632  return (*f)(recv, argv[0], argv[1], argv[2]);
3633 }
3634 
3635 static VALUE
3636 ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3637 {
3638  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3639  return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3640 }
3641 
3642 static VALUE
3643 ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3644 {
3645  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3646  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3647 }
3648 
3649 static VALUE
3650 ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3651 {
3653  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3654 }
3655 
3656 static VALUE
3657 ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3658 {
3660  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3661 }
3662 
3663 static VALUE
3664 ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3665 {
3667  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3668 }
3669 
3670 static VALUE
3671 ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3672 {
3674  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3675 }
3676 
3677 static VALUE
3678 ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3679 {
3681  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3682 }
3683 
3684 static VALUE
3685 ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3686 {
3688  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3689 }
3690 
3691 static VALUE
3692 ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3693 {
3695  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3696 }
3697 
3698 static VALUE
3699 ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3700 {
3702  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3703 }
3704 
3705 static VALUE
3706 ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3707 {
3709  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3710 }
3711 
3712 static VALUE
3713 ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3714 {
3716  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3717 }
3718 
3719 static inline int
3720 vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3721 {
3722  const int ov_flags = RAISED_STACKOVERFLOW;
3723  if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3724  if (rb_ec_raised_p(ec, ov_flags)) {
3725  rb_ec_raised_reset(ec, ov_flags);
3726  return TRUE;
3727  }
3728  return FALSE;
3729 }
3730 
3731 #define CHECK_CFP_CONSISTENCY(func) \
3732  (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3733  rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3734 
3735 static inline
3736 const rb_method_cfunc_t *
3737 vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3738 {
3739 #if VM_DEBUG_VERIFY_METHOD_CACHE
3740  switch (me->def->type) {
3741  case VM_METHOD_TYPE_CFUNC:
3742  case VM_METHOD_TYPE_NOTIMPLEMENTED:
3743  break;
3744 # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3745  METHOD_BUG(ISEQ);
3746  METHOD_BUG(ATTRSET);
3747  METHOD_BUG(IVAR);
3748  METHOD_BUG(BMETHOD);
3749  METHOD_BUG(ZSUPER);
3750  METHOD_BUG(UNDEF);
3751  METHOD_BUG(OPTIMIZED);
3752  METHOD_BUG(MISSING);
3753  METHOD_BUG(REFINED);
3754  METHOD_BUG(ALIAS);
3755 # undef METHOD_BUG
3756  default:
3757  rb_bug("wrong method type: %d", me->def->type);
3758  }
3759 #endif
3760  return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3761 }
3762 
3763 static VALUE
3764 vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3765  int argc, VALUE *argv, VALUE *stack_bottom)
3766 {
3767  RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3768  const struct rb_callinfo *ci = calling->cd->ci;
3769  const struct rb_callcache *cc = calling->cc;
3770  VALUE val;
3771  const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3772  const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3773 
3774  VALUE recv = calling->recv;
3775  VALUE block_handler = calling->block_handler;
3776  VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3777 
3778  if (UNLIKELY(calling->kw_splat)) {
3779  frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3780  }
3781 
3782  VM_ASSERT(reg_cfp == ec->cfp);
3783 
3784  RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3785  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3786 
3787  vm_push_frame(ec, NULL, frame_type, recv,
3788  block_handler, (VALUE)me,
3789  0, ec->cfp->sp, 0, 0);
3790 
3791  int len = cfunc->argc;
3792  if (len >= 0) rb_check_arity(argc, len, len);
3793 
3794  reg_cfp->sp = stack_bottom;
3795  val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3796 
3797  CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3798 
3799  rb_vm_pop_frame(ec);
3800 
3801  VM_ASSERT(ec->cfp->sp == stack_bottom);
3802 
3803  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3804  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3805 
3806  return val;
3807 }
3808 
3809 // Push a C method frame for a given cme. This is called when JIT code skipped
3810 // pushing a frame but the C method reached a point where a frame is needed.
3811 void
3812 rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3813 {
3814  VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3815  rb_execution_context_t *ec = GET_EC();
3816  VALUE *sp = ec->cfp->sp;
3817  VALUE recv = *(sp - recv_idx - 1);
3818  VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3819  VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3820 #if VM_CHECK_MODE > 0
3821  // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3822  *(GET_EC()->cfp->sp) = Qfalse;
3823 #endif
3824  vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3825 }
3826 
3827 // If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3828 bool
3829 rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3830 {
3831  return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3832 }
3833 
3834 static VALUE
3835 vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3836 {
3837  int argc = calling->argc;
3838  VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3839  VALUE *argv = &stack_bottom[1];
3840 
3841  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3842 }
3843 
3844 static VALUE
3845 vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3846 {
3847  const struct rb_callinfo *ci = calling->cd->ci;
3848  RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3849 
3850  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3851  VALUE argv_ary;
3852  if (UNLIKELY(argv_ary = calling->heap_argv)) {
3853  VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3854  int argc = RARRAY_LENINT(argv_ary);
3855  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3856  VALUE *stack_bottom = reg_cfp->sp - 2;
3857 
3858  VM_ASSERT(calling->argc == 1);
3859  VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3860  VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3861 
3862  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3863  }
3864  else {
3865  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3866 
3867  return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3868  }
3869 }
3870 
3871 static inline VALUE
3872 vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3873 {
3874  VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3875  int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3876 
3877  if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3878  return vm_call_cfunc_other(ec, reg_cfp, calling);
3879  }
3880 
3881  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3882  calling->kw_splat = 0;
3883  int i;
3884  VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3885  VALUE *sp = stack_bottom;
3886  CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3887  for(i = 0; i < argc; i++) {
3888  *++sp = argv[i];
3889  }
3890  reg_cfp->sp = sp+1;
3891 
3892  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3893 }
3894 
3895 static inline VALUE
3896 vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3897 {
3898  RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3899  VALUE argv_ary = reg_cfp->sp[-1];
3900  int argc = RARRAY_LENINT(argv_ary);
3901  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3902  VALUE last_hash;
3903  int argc_offset = 0;
3904 
3905  if (UNLIKELY(argc > 0 &&
3906  RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3907  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3908  if (!RHASH_EMPTY_P(last_hash)) {
3909  return vm_call_cfunc_other(ec, reg_cfp, calling);
3910  }
3911  argc_offset++;
3912  }
3913  return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3914 }
3915 
3916 static inline VALUE
3917 vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3918 {
3919  RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3920  VALUE keyword_hash = reg_cfp->sp[-1];
3921 
3922  if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3923  return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3924  }
3925 
3926  return vm_call_cfunc_other(ec, reg_cfp, calling);
3927 }
3928 
3929 static VALUE
3930 vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3931 {
3932  const struct rb_callinfo *ci = calling->cd->ci;
3933  RB_DEBUG_COUNTER_INC(ccf_cfunc);
3934 
3935  if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3936  if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3937  // f(*a)
3938  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3939  return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3940  }
3941  if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3942  // f(*a, **kw)
3943  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3944  return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3945  }
3946  }
3947 
3948  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3949  return vm_call_cfunc_other(ec, reg_cfp, calling);
3950 }
3951 
3952 static VALUE
3953 vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3954 {
3955  const struct rb_callcache *cc = calling->cc;
3956  RB_DEBUG_COUNTER_INC(ccf_ivar);
3957  cfp->sp -= 1;
3958  VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3959  return ivar;
3960 }
3961 
3962 static VALUE
3963 vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3964 {
3965  RB_DEBUG_COUNTER_INC(ccf_attrset);
3966  VALUE val = *(cfp->sp - 1);
3967  cfp->sp -= 2;
3968  attr_index_t index = vm_cc_attr_index(cc);
3969  shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3970  ID id = vm_cc_cme(cc)->def->body.attr.id;
3971  rb_check_frozen(obj);
3972  VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3973  if (UNDEF_P(res)) {
3974  switch (BUILTIN_TYPE(obj)) {
3975  case T_OBJECT:
3976  case T_CLASS:
3977  case T_MODULE:
3978  break;
3979  default:
3980  {
3981  res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3982  if (!UNDEF_P(res)) {
3983  return res;
3984  }
3985  }
3986  }
3987  res = vm_setivar_slowpath_attr(obj, id, val, cc);
3988  }
3989  return res;
3990 }
3991 
3992 static VALUE
3993 vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3994 {
3995  return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3996 }
3997 
3998 static inline VALUE
3999 vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
4000 {
4001  rb_proc_t *proc;
4002  VALUE val;
4003  const struct rb_callcache *cc = calling->cc;
4004  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4005  VALUE procv = cme->def->body.bmethod.proc;
4006 
4007  if (!RB_OBJ_SHAREABLE_P(procv) &&
4008  cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4009  rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4010  }
4011 
4012  /* control block frame */
4013  GetProcPtr(procv, proc);
4014  val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4015 
4016  return val;
4017 }
4018 
4019 static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4020 
4021 static VALUE
4022 vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4023 {
4024  RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4025 
4026  const struct rb_callcache *cc = calling->cc;
4027  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4028  VALUE procv = cme->def->body.bmethod.proc;
4029 
4030  if (!RB_OBJ_SHAREABLE_P(procv) &&
4031  cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4032  rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4033  }
4034 
4035  rb_proc_t *proc;
4036  GetProcPtr(procv, proc);
4037  const struct rb_block *block = &proc->block;
4038 
4039  while (vm_block_type(block) == block_type_proc) {
4040  block = vm_proc_block(block->as.proc);
4041  }
4042  VM_ASSERT(vm_block_type(block) == block_type_iseq);
4043 
4044  const struct rb_captured_block *captured = &block->as.captured;
4045  const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4046  VALUE * const argv = cfp->sp - calling->argc;
4047  const int arg_size = ISEQ_BODY(iseq)->param.size;
4048 
4049  int opt_pc;
4050  if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4051  opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4052  }
4053  else {
4054  opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4055  }
4056 
4057  cfp->sp = argv - 1; // -1 for the receiver
4058 
4059  vm_push_frame(ec, iseq,
4060  VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4061  calling->recv,
4062  VM_GUARDED_PREV_EP(captured->ep),
4063  (VALUE)cme,
4064  ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4065  argv + arg_size,
4066  ISEQ_BODY(iseq)->local_table_size - arg_size,
4067  ISEQ_BODY(iseq)->stack_max);
4068 
4069  return Qundef;
4070 }
4071 
4072 static VALUE
4073 vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4074 {
4075  RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4076 
4077  VALUE *argv;
4078  int argc;
4079  CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4080  if (UNLIKELY(calling->heap_argv)) {
4081  argv = RARRAY_PTR(calling->heap_argv);
4082  cfp->sp -= 2;
4083  }
4084  else {
4085  argc = calling->argc;
4086  argv = ALLOCA_N(VALUE, argc);
4087  MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4088  cfp->sp += - argc - 1;
4089  }
4090 
4091  return vm_call_bmethod_body(ec, calling, argv);
4092 }
4093 
4094 static VALUE
4095 vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4096 {
4097  RB_DEBUG_COUNTER_INC(ccf_bmethod);
4098 
4099  const struct rb_callcache *cc = calling->cc;
4100  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4101  VALUE procv = cme->def->body.bmethod.proc;
4102  rb_proc_t *proc;
4103  GetProcPtr(procv, proc);
4104  const struct rb_block *block = &proc->block;
4105 
4106  while (vm_block_type(block) == block_type_proc) {
4107  block = vm_proc_block(block->as.proc);
4108  }
4109  if (vm_block_type(block) == block_type_iseq) {
4110  CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4111  return vm_call_iseq_bmethod(ec, cfp, calling);
4112  }
4113 
4114  CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4115  return vm_call_noniseq_bmethod(ec, cfp, calling);
4116 }
4117 
4118 VALUE
4119 rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4120 {
4121  VALUE klass = current_class;
4122 
4123  /* for prepended Module, then start from cover class */
4124  if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
4125  RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4126  klass = RBASIC_CLASS(klass);
4127  }
4128 
4129  while (RTEST(klass)) {
4130  VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4131  if (owner == target_owner) {
4132  return klass;
4133  }
4134  klass = RCLASS_SUPER(klass);
4135  }
4136 
4137  return current_class; /* maybe module function */
4138 }
4139 
4140 static const rb_callable_method_entry_t *
4141 aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4142 {
4143  const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4144  const rb_callable_method_entry_t *cme;
4145 
4146  if (orig_me->defined_class == 0) {
4147  VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4148  VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4149  cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4150 
4151  if (me->def->reference_count == 1) {
4152  RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4153  }
4154  else {
4155  rb_method_definition_t *def =
4156  rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4157  rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4158  }
4159  }
4160  else {
4161  cme = (const rb_callable_method_entry_t *)orig_me;
4162  }
4163 
4164  VM_ASSERT(callable_method_entry_p(cme));
4165  return cme;
4166 }
4167 
4169 rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4170 {
4171  return aliased_callable_method_entry(me);
4172 }
4173 
4174 static VALUE
4175 vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4176 {
4177  calling->cc = &VM_CC_ON_STACK(Qundef,
4178  vm_call_general,
4179  {{0}},
4180  aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4181 
4182  return vm_call_method_each_type(ec, cfp, calling);
4183 }
4184 
4185 static enum method_missing_reason
4186 ci_missing_reason(const struct rb_callinfo *ci)
4187 {
4188  enum method_missing_reason stat = MISSING_NOENTRY;
4189  if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4190  if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4191  if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4192  return stat;
4193 }
4194 
4195 static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4196 
4197 static VALUE
4198 vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4199  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4200 {
4201  ASSUME(calling->argc >= 0);
4202 
4203  enum method_missing_reason missing_reason = MISSING_NOENTRY;
4204  int argc = calling->argc;
4205  VALUE recv = calling->recv;
4206  VALUE klass = CLASS_OF(recv);
4207  ID mid = rb_check_id(&symbol);
4208  flags |= VM_CALL_OPT_SEND;
4209 
4210  if (UNLIKELY(! mid)) {
4211  mid = idMethodMissing;
4212  missing_reason = ci_missing_reason(ci);
4213  ec->method_missing_reason = missing_reason;
4214 
4215  VALUE argv_ary;
4216  if (UNLIKELY(argv_ary = calling->heap_argv)) {
4217  if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4218  rb_ary_unshift(argv_ary, symbol);
4219 
4220  /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4221  int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4222  VALUE exc = rb_make_no_method_exception(
4223  rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4224 
4225  rb_exc_raise(exc);
4226  }
4227  rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4228  }
4229  else {
4230  /* E.g. when argc == 2
4231  *
4232  * | | | | TOPN
4233  * | | +------+
4234  * | | +---> | arg1 | 0
4235  * +------+ | +------+
4236  * | arg1 | -+ +-> | arg0 | 1
4237  * +------+ | +------+
4238  * | arg0 | ---+ | sym | 2
4239  * +------+ +------+
4240  * | recv | | recv | 3
4241  * --+------+--------+------+------
4242  */
4243  int i = argc;
4244  CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4245  INC_SP(1);
4246  MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4247  argc = ++calling->argc;
4248 
4249  if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4250  /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4251  TOPN(i) = symbol;
4252  int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4253  const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4254  VALUE exc = rb_make_no_method_exception(
4255  rb_eNoMethodError, 0, recv, argc, argv, priv);
4256 
4257  rb_exc_raise(exc);
4258  }
4259  else {
4260  TOPN(i) = rb_str_intern(symbol);
4261  }
4262  }
4263  }
4264 
4265  struct rb_forwarding_call_data new_fcd = {
4266  .cd = {
4267  .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4268  .cc = NULL,
4269  },
4270  .caller_ci = NULL,
4271  };
4272 
4273  if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4274  calling->cd = &new_fcd.cd;
4275  }
4276  else {
4277  const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4278  VM_ASSERT((vm_ci_argc(caller_ci), 1));
4279  new_fcd.caller_ci = caller_ci;
4280  calling->cd = (struct rb_call_data *)&new_fcd;
4281  }
4282  calling->cc = &VM_CC_ON_STACK(klass,
4283  vm_call_general,
4284  { .method_missing_reason = missing_reason },
4285  rb_callable_method_entry_with_refinements(klass, mid, NULL));
4286 
4287  if (flags & VM_CALL_FCALL) {
4288  return vm_call_method(ec, reg_cfp, calling);
4289  }
4290 
4291  const struct rb_callcache *cc = calling->cc;
4292  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4293 
4294  if (vm_cc_cme(cc) != NULL) {
4295  switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4296  case METHOD_VISI_PUBLIC: /* likely */
4297  return vm_call_method_each_type(ec, reg_cfp, calling);
4298  case METHOD_VISI_PRIVATE:
4299  vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4300  break;
4301  case METHOD_VISI_PROTECTED:
4302  vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4303  break;
4304  default:
4305  VM_UNREACHABLE(vm_call_method);
4306  }
4307  return vm_call_method_missing(ec, reg_cfp, calling);
4308  }
4309 
4310  return vm_call_method_nome(ec, reg_cfp, calling);
4311 }
4312 
4313 static VALUE
4314 vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4315 {
4316  const struct rb_callinfo *ci = calling->cd->ci;
4317  int i;
4318  VALUE sym;
4319 
4320  i = calling->argc - 1;
4321 
4322  if (calling->argc == 0) {
4323  rb_raise(rb_eArgError, "no method name given");
4324  }
4325 
4326  sym = TOPN(i);
4327  /* E.g. when i == 2
4328  *
4329  * | | | | TOPN
4330  * +------+ | |
4331  * | arg1 | ---+ | | 0
4332  * +------+ | +------+
4333  * | arg0 | -+ +-> | arg1 | 1
4334  * +------+ | +------+
4335  * | sym | +---> | arg0 | 2
4336  * +------+ +------+
4337  * | recv | | recv | 3
4338  * --+------+--------+------+------
4339  */
4340  /* shift arguments */
4341  if (i > 0) {
4342  MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4343  }
4344  calling->argc -= 1;
4345  DEC_SP(1);
4346 
4347  return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4348 }
4349 
4350 static VALUE
4351 vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4352 {
4353  RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4354  const struct rb_callinfo *ci = calling->cd->ci;
4355  int flags = VM_CALL_FCALL;
4356  VALUE sym;
4357 
4358  VALUE argv_ary;
4359  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4360  if (UNLIKELY(argv_ary = calling->heap_argv)) {
4361  sym = rb_ary_shift(argv_ary);
4362  flags |= VM_CALL_ARGS_SPLAT;
4363  if (calling->kw_splat) {
4364  VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4365  ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4366  calling->kw_splat = 0;
4367  }
4368  return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4369  }
4370 
4371  if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4372  return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4373 }
4374 
4375 static VALUE
4376 vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4377 {
4378  RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4379  return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4380 }
4381 
4382 static VALUE
4383 vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4384 {
4385  RB_DEBUG_COUNTER_INC(ccf_opt_send);
4386 
4387  const struct rb_callinfo *ci = calling->cd->ci;
4388  int flags = vm_ci_flag(ci);
4389 
4390  if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4391  ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4392  (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4393  ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4394  CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4395  return vm_call_opt_send_complex(ec, reg_cfp, calling);
4396  }
4397 
4398  CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4399  return vm_call_opt_send_simple(ec, reg_cfp, calling);
4400 }
4401 
4402 static VALUE
4403 vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4404  const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4405 {
4406  RB_DEBUG_COUNTER_INC(ccf_method_missing);
4407 
4408  VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4409  unsigned int argc, flag;
4410 
4411  flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4412  argc = ++calling->argc;
4413 
4414  /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4415  CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4416  vm_check_canary(ec, reg_cfp->sp);
4417  if (argc > 1) {
4418  MEMMOVE(argv+1, argv, VALUE, argc-1);
4419  }
4420  argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4421  INC_SP(1);
4422 
4423  ec->method_missing_reason = reason;
4424 
4425  struct rb_forwarding_call_data new_fcd = {
4426  .cd = {
4427  .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4428  .cc = NULL,
4429  },
4430  .caller_ci = NULL,
4431  };
4432 
4433  if (!(flag & VM_CALL_FORWARDING)) {
4434  calling->cd = &new_fcd.cd;
4435  }
4436  else {
4437  const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4438  VM_ASSERT((vm_ci_argc(caller_ci), 1));
4439  new_fcd.caller_ci = caller_ci;
4440  calling->cd = (struct rb_call_data *)&new_fcd;
4441  }
4442 
4443  calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4444  rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4445  return vm_call_method(ec, reg_cfp, calling);
4446 }
4447 
4448 static VALUE
4449 vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4450 {
4451  return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4452 }
4453 
4454 static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4455 static VALUE
4456 vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4457 {
4458  klass = RCLASS_SUPER(klass);
4459 
4460  const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4461  if (cme == NULL) {
4462  return vm_call_method_nome(ec, cfp, calling);
4463  }
4464  if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4465  cme->def->body.refined.orig_me) {
4466  cme = refined_method_callable_without_refinement(cme);
4467  }
4468 
4469  calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4470 
4471  return vm_call_method_each_type(ec, cfp, calling);
4472 }
4473 
4474 static inline VALUE
4475 find_refinement(VALUE refinements, VALUE klass)
4476 {
4477  if (NIL_P(refinements)) {
4478  return Qnil;
4479  }
4480  return rb_hash_lookup(refinements, klass);
4481 }
4482 
4483 PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4484 static rb_control_frame_t *
4485 current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4486 {
4487  rb_control_frame_t *top_cfp = cfp;
4488 
4489  if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4490  const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4491 
4492  do {
4493  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4494  if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4495  /* TODO: orphan block */
4496  return top_cfp;
4497  }
4498  } while (cfp->iseq != local_iseq);
4499  }
4500  return cfp;
4501 }
4502 
4503 static const rb_callable_method_entry_t *
4504 refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4505 {
4506  const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4507  const rb_callable_method_entry_t *cme;
4508 
4509  if (orig_me->defined_class == 0) {
4510  cme = NULL;
4511  rb_notimplement();
4512  }
4513  else {
4514  cme = (const rb_callable_method_entry_t *)orig_me;
4515  }
4516 
4517  VM_ASSERT(callable_method_entry_p(cme));
4518 
4519  if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4520  cme = NULL;
4521  }
4522 
4523  return cme;
4524 }
4525 
4526 static const rb_callable_method_entry_t *
4527 search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4528 {
4529  ID mid = vm_ci_mid(calling->cd->ci);
4530  const rb_cref_t *cref = vm_get_cref(cfp->ep);
4531  const struct rb_callcache * const cc = calling->cc;
4532  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4533 
4534  for (; cref; cref = CREF_NEXT(cref)) {
4535  const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4536  if (NIL_P(refinement)) continue;
4537 
4538  const rb_callable_method_entry_t *const ref_me =
4539  rb_callable_method_entry(refinement, mid);
4540 
4541  if (ref_me) {
4542  if (vm_cc_call(cc) == vm_call_super_method) {
4543  const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4544  const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4545  if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4546  continue;
4547  }
4548  }
4549 
4550  if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4551  cme->def != ref_me->def) {
4552  cme = ref_me;
4553  }
4554  if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4555  return cme;
4556  }
4557  }
4558  else {
4559  return NULL;
4560  }
4561  }
4562 
4563  if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4564  return refined_method_callable_without_refinement(vm_cc_cme(cc));
4565  }
4566  else {
4567  VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4568  const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4569  return cme;
4570  }
4571 }
4572 
4573 static VALUE
4574 vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4575 {
4576  const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4577 
4578  if (ref_cme) {
4579  if (calling->cd->cc) {
4580  const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4581  RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4582  return vm_call_method(ec, cfp, calling);
4583  }
4584  else {
4585  struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4586  calling->cc= ref_cc;
4587  return vm_call_method(ec, cfp, calling);
4588  }
4589  }
4590  else {
4591  return vm_call_method_nome(ec, cfp, calling);
4592  }
4593 }
4594 
4595 static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4596 
4597 NOINLINE(static VALUE
4598  vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4599  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4600 
4601 static VALUE
4602 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4603  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4604 {
4605  int argc = calling->argc;
4606 
4607  /* remove self */
4608  if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4609  DEC_SP(1);
4610 
4611  return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4612 }
4613 
4614 static VALUE
4615 vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4616 {
4617  RB_DEBUG_COUNTER_INC(ccf_opt_call);
4618 
4619  const struct rb_callinfo *ci = calling->cd->ci;
4620  VALUE procval = calling->recv;
4621  return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4622 }
4623 
4624 static VALUE
4625 vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4626 {
4627  RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4628 
4629  VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4630  const struct rb_callinfo *ci = calling->cd->ci;
4631 
4632  if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4633  return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4634  }
4635  else {
4636  calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4637  calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4638  return vm_call_general(ec, reg_cfp, calling);
4639  }
4640 }
4641 
4642 static VALUE
4643 vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4644 {
4645  VALUE recv = calling->recv;
4646 
4647  VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4648  VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4649  VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4650 
4651  const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4652  return internal_RSTRUCT_GET(recv, off);
4653 }
4654 
4655 static VALUE
4656 vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4657 {
4658  RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4659 
4660  VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4661  reg_cfp->sp -= 1;
4662  return ret;
4663 }
4664 
4665 static VALUE
4666 vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4667 {
4668  VALUE recv = calling->recv;
4669 
4670  VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4671  VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4672  VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4673 
4674  rb_check_frozen(recv);
4675 
4676  const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4677  internal_RSTRUCT_SET(recv, off, val);
4678 
4679  return val;
4680 }
4681 
4682 static VALUE
4683 vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4684 {
4685  RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4686 
4687  VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4688  reg_cfp->sp -= 2;
4689  return ret;
4690 }
4691 
4692 NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4693  const struct rb_callinfo *ci, const struct rb_callcache *cc));
4694 
4695 #define VM_CALL_METHOD_ATTR(var, func, nohook) \
4696  if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4697  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4698  vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4699  var = func; \
4700  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4701  vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4702  } \
4703  else { \
4704  nohook; \
4705  var = func; \
4706  }
4707 
4708 static VALUE
4709 vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4710  const struct rb_callinfo *ci, const struct rb_callcache *cc)
4711 {
4712  switch (vm_cc_cme(cc)->def->body.optimized.type) {
4713  case OPTIMIZED_METHOD_TYPE_SEND:
4714  CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4715  return vm_call_opt_send(ec, cfp, calling);
4716  case OPTIMIZED_METHOD_TYPE_CALL:
4717  CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4718  return vm_call_opt_call(ec, cfp, calling);
4719  case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4720  CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4721  return vm_call_opt_block_call(ec, cfp, calling);
4722  case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4723  CALLER_SETUP_ARG(cfp, calling, ci, 0);
4724  rb_check_arity(calling->argc, 0, 0);
4725 
4726  VALUE v;
4727  VM_CALL_METHOD_ATTR(v,
4728  vm_call_opt_struct_aref(ec, cfp, calling),
4729  set_vm_cc_ivar(cc); \
4730  CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4731  return v;
4732  }
4733  case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4734  CALLER_SETUP_ARG(cfp, calling, ci, 1);
4735  rb_check_arity(calling->argc, 1, 1);
4736 
4737  VALUE v;
4738  VM_CALL_METHOD_ATTR(v,
4739  vm_call_opt_struct_aset(ec, cfp, calling),
4740  set_vm_cc_ivar(cc); \
4741  CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4742  return v;
4743  }
4744  default:
4745  rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4746  }
4747 }
4748 
4749 static VALUE
4750 vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4751 {
4752  const struct rb_callinfo *ci = calling->cd->ci;
4753  const struct rb_callcache *cc = calling->cc;
4754  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4755  VALUE v;
4756 
4757  VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4758 
4759  switch (cme->def->type) {
4760  case VM_METHOD_TYPE_ISEQ:
4761  if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4762  CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4763  return vm_call_iseq_fwd_setup(ec, cfp, calling);
4764  }
4765  else {
4766  CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4767  return vm_call_iseq_setup(ec, cfp, calling);
4768  }
4769 
4770  case VM_METHOD_TYPE_NOTIMPLEMENTED:
4771  case VM_METHOD_TYPE_CFUNC:
4772  CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4773  return vm_call_cfunc(ec, cfp, calling);
4774 
4775  case VM_METHOD_TYPE_ATTRSET:
4776  CALLER_SETUP_ARG(cfp, calling, ci, 1);
4777 
4778  rb_check_arity(calling->argc, 1, 1);
4779 
4780  const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4781 
4782  if (vm_cc_markable(cc)) {
4783  vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4784  VM_CALL_METHOD_ATTR(v,
4785  vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4786  CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4787  }
4788  else {
4789  cc = &((struct rb_callcache) {
4790  .flags = T_IMEMO |
4791  (imemo_callcache << FL_USHIFT) |
4792  VM_CALLCACHE_UNMARKABLE |
4793  VM_CALLCACHE_ON_STACK,
4794  .klass = cc->klass,
4795  .cme_ = cc->cme_,
4796  .call_ = cc->call_,
4797  .aux_ = {
4798  .attr = {
4799  .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4800  }
4801  },
4802  });
4803 
4804  VM_CALL_METHOD_ATTR(v,
4805  vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4806  CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4807  }
4808  return v;
4809 
4810  case VM_METHOD_TYPE_IVAR:
4811  CALLER_SETUP_ARG(cfp, calling, ci, 0);
4812  rb_check_arity(calling->argc, 0, 0);
4813  vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4814  const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4815  VM_CALL_METHOD_ATTR(v,
4816  vm_call_ivar(ec, cfp, calling),
4817  CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4818  return v;
4819 
4820  case VM_METHOD_TYPE_MISSING:
4821  vm_cc_method_missing_reason_set(cc, 0);
4822  CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4823  return vm_call_method_missing(ec, cfp, calling);
4824 
4825  case VM_METHOD_TYPE_BMETHOD:
4826  CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4827  return vm_call_bmethod(ec, cfp, calling);
4828 
4829  case VM_METHOD_TYPE_ALIAS:
4830  CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4831  return vm_call_alias(ec, cfp, calling);
4832 
4833  case VM_METHOD_TYPE_OPTIMIZED:
4834  return vm_call_optimized(ec, cfp, calling, ci, cc);
4835 
4836  case VM_METHOD_TYPE_UNDEF:
4837  break;
4838 
4839  case VM_METHOD_TYPE_ZSUPER:
4840  return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4841 
4842  case VM_METHOD_TYPE_REFINED:
4843  // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4844  // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4845  return vm_call_refined(ec, cfp, calling);
4846  }
4847 
4848  rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4849 }
4850 
4851 NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4852 
4853 static VALUE
4854 vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4855 {
4856  /* method missing */
4857  const struct rb_callinfo *ci = calling->cd->ci;
4858  const int stat = ci_missing_reason(ci);
4859 
4860  if (vm_ci_mid(ci) == idMethodMissing) {
4861  if (UNLIKELY(calling->heap_argv)) {
4862  vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4863  }
4864  else {
4865  rb_control_frame_t *reg_cfp = cfp;
4866  VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4867  vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4868  }
4869  }
4870  else {
4871  return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4872  }
4873 }
4874 
4875 /* Protected method calls and super invocations need to check that the receiver
4876  * (self for super) inherits the module on which the method is defined.
4877  * In the case of refinements, it should consider the original class not the
4878  * refinement.
4879  */
4880 static VALUE
4881 vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4882 {
4883  VALUE defined_class = me->defined_class;
4884  VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4885  return NIL_P(refined_class) ? defined_class : refined_class;
4886 }
4887 
4888 static inline VALUE
4889 vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4890 {
4891  const struct rb_callinfo *ci = calling->cd->ci;
4892  const struct rb_callcache *cc = calling->cc;
4893 
4894  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4895 
4896  if (vm_cc_cme(cc) != NULL) {
4897  switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4898  case METHOD_VISI_PUBLIC: /* likely */
4899  return vm_call_method_each_type(ec, cfp, calling);
4900 
4901  case METHOD_VISI_PRIVATE:
4902  if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4903  enum method_missing_reason stat = MISSING_PRIVATE;
4904  if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4905 
4906  vm_cc_method_missing_reason_set(cc, stat);
4907  CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4908  return vm_call_method_missing(ec, cfp, calling);
4909  }
4910  return vm_call_method_each_type(ec, cfp, calling);
4911 
4912  case METHOD_VISI_PROTECTED:
4913  if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4914  VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4915  if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4916  vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4917  return vm_call_method_missing(ec, cfp, calling);
4918  }
4919  else {
4920  /* caching method info to dummy cc */
4921  VM_ASSERT(vm_cc_cme(cc) != NULL);
4922  struct rb_callcache cc_on_stack = *cc;
4923  FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4924  calling->cc = &cc_on_stack;
4925  return vm_call_method_each_type(ec, cfp, calling);
4926  }
4927  }
4928  return vm_call_method_each_type(ec, cfp, calling);
4929 
4930  default:
4931  rb_bug("unreachable");
4932  }
4933  }
4934  else {
4935  return vm_call_method_nome(ec, cfp, calling);
4936  }
4937 }
4938 
4939 static VALUE
4940 vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4941 {
4942  RB_DEBUG_COUNTER_INC(ccf_general);
4943  return vm_call_method(ec, reg_cfp, calling);
4944 }
4945 
4946 void
4947 rb_vm_cc_general(const struct rb_callcache *cc)
4948 {
4949  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4950  VM_ASSERT(cc != vm_cc_empty());
4951 
4952  *(vm_call_handler *)&cc->call_ = vm_call_general;
4953 }
4954 
4955 static VALUE
4956 vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4957 {
4958  RB_DEBUG_COUNTER_INC(ccf_super_method);
4959 
4960  // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4961  // can merge the function and the address of the function becomes same.
4962  // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4963  if (ec == NULL) rb_bug("unreachable");
4964 
4965  /* this check is required to distinguish with other functions. */
4966  VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4967  return vm_call_method(ec, reg_cfp, calling);
4968 }
4969 
4970 /* super */
4971 
4972 static inline VALUE
4973 vm_search_normal_superclass(VALUE klass)
4974 {
4975  if (BUILTIN_TYPE(klass) == T_ICLASS &&
4976  RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4977  FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4978  klass = RBASIC(klass)->klass;
4979  }
4980  klass = RCLASS_ORIGIN(klass);
4981  return RCLASS_SUPER(klass);
4982 }
4983 
4984 NORETURN(static void vm_super_outside(void));
4985 
4986 static void
4987 vm_super_outside(void)
4988 {
4989  rb_raise(rb_eNoMethodError, "super called outside of method");
4990 }
4991 
4992 static const struct rb_callcache *
4993 empty_cc_for_super(void)
4994 {
4995  return &vm_empty_cc_for_super;
4996 }
4997 
4998 static const struct rb_callcache *
4999 vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
5000 {
5001  VALUE current_defined_class;
5002  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5003 
5004  if (!me) {
5005  vm_super_outside();
5006  }
5007 
5008  current_defined_class = vm_defined_class_for_protected_call(me);
5009 
5010  if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5011  reg_cfp->iseq != method_entry_iseqptr(me) &&
5012  !rb_obj_is_kind_of(recv, current_defined_class)) {
5013  VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5014  RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5015 
5016  if (m) { /* not bound UnboundMethod */
5018  "self has wrong type to call super in this context: "
5019  "%"PRIsVALUE" (expected %"PRIsVALUE")",
5020  rb_obj_class(recv), m);
5021  }
5022  }
5023 
5024  if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5026  "implicit argument passing of super from method defined"
5027  " by define_method() is not supported."
5028  " Specify all arguments explicitly.");
5029  }
5030 
5031  ID mid = me->def->original_id;
5032 
5033  if (!vm_ci_markable(cd->ci)) {
5034  VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5035  }
5036  else {
5037  // update iseq. really? (TODO)
5038  cd->ci = vm_ci_new_runtime(mid,
5039  vm_ci_flag(cd->ci),
5040  vm_ci_argc(cd->ci),
5041  vm_ci_kwarg(cd->ci));
5042 
5043  RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5044  }
5045 
5046  const struct rb_callcache *cc;
5047 
5048  VALUE klass = vm_search_normal_superclass(me->defined_class);
5049 
5050  if (!klass) {
5051  /* bound instance method of module */
5052  cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5053  RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5054  }
5055  else {
5056  cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5057  const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5058 
5059  // define_method can cache for different method id
5060  if (cached_cme == NULL) {
5061  // empty_cc_for_super is not markable object
5062  cd->cc = empty_cc_for_super();
5063  }
5064  else if (cached_cme->called_id != mid) {
5065  const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5066  if (cme) {
5067  cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5068  RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5069  }
5070  else {
5071  cd->cc = cc = empty_cc_for_super();
5072  }
5073  }
5074  else {
5075  switch (cached_cme->def->type) {
5076  // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5077  case VM_METHOD_TYPE_REFINED:
5078  // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5079  case VM_METHOD_TYPE_ATTRSET:
5080  case VM_METHOD_TYPE_IVAR:
5081  vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5082  break;
5083  default:
5084  break; // use fastpath
5085  }
5086  }
5087  }
5088 
5089  VM_ASSERT((vm_cc_cme(cc), true));
5090 
5091  return cc;
5092 }
5093 
5094 /* yield */
5095 
5096 static inline int
5097 block_proc_is_lambda(const VALUE procval)
5098 {
5099  rb_proc_t *proc;
5100 
5101  if (procval) {
5102  GetProcPtr(procval, proc);
5103  return proc->is_lambda;
5104  }
5105  else {
5106  return 0;
5107  }
5108 }
5109 
5110 static VALUE
5111 vm_yield_with_cfunc(rb_execution_context_t *ec,
5112  const struct rb_captured_block *captured,
5113  VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5114  const rb_callable_method_entry_t *me)
5115 {
5116  int is_lambda = FALSE; /* TODO */
5117  VALUE val, arg, blockarg;
5118  int frame_flag;
5119  const struct vm_ifunc *ifunc = captured->code.ifunc;
5120 
5121  if (is_lambda) {
5122  arg = rb_ary_new4(argc, argv);
5123  }
5124  else if (argc == 0) {
5125  arg = Qnil;
5126  }
5127  else {
5128  arg = argv[0];
5129  }
5130 
5131  blockarg = rb_vm_bh_to_procval(ec, block_handler);
5132 
5133  frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5134  if (kw_splat) {
5135  frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5136  }
5137 
5138  vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5139  frame_flag,
5140  self,
5141  VM_GUARDED_PREV_EP(captured->ep),
5142  (VALUE)me,
5143  0, ec->cfp->sp, 0, 0);
5144  val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5145  rb_vm_pop_frame(ec);
5146 
5147  return val;
5148 }
5149 
5150 VALUE
5151 rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5152 {
5153  return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5154 }
5155 
5156 static VALUE
5157 vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5158 {
5159  return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5160 }
5161 
5162 static inline int
5163 vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5164 {
5165  int i;
5166  long len = RARRAY_LEN(ary);
5167 
5168  CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5169 
5170  for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5171  argv[i] = RARRAY_AREF(ary, i);
5172  }
5173 
5174  return i;
5175 }
5176 
5177 static inline VALUE
5178 vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5179 {
5180  VALUE ary, arg0 = argv[0];
5181  ary = rb_check_array_type(arg0);
5182 #if 0
5183  argv[0] = arg0;
5184 #else
5185  VM_ASSERT(argv[0] == arg0);
5186 #endif
5187  return ary;
5188 }
5189 
5190 static int
5191 vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5192 {
5193  if (rb_simple_iseq_p(iseq)) {
5194  rb_control_frame_t *cfp = ec->cfp;
5195  VALUE arg0;
5196 
5197  CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5198 
5199  if (arg_setup_type == arg_setup_block &&
5200  calling->argc == 1 &&
5201  ISEQ_BODY(iseq)->param.flags.has_lead &&
5202  !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5203  !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5204  calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5205  }
5206 
5207  if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5208  if (arg_setup_type == arg_setup_block) {
5209  if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5210  int i;
5211  CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5212  for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5213  calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5214  }
5215  else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5216  calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5217  }
5218  }
5219  else {
5220  argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5221  }
5222  }
5223 
5224  return 0;
5225  }
5226  else {
5227  return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5228  }
5229 }
5230 
5231 static int
5232 vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5233 {
5234  struct rb_calling_info calling_entry, *calling;
5235 
5236  calling = &calling_entry;
5237  calling->argc = argc;
5238  calling->block_handler = block_handler;
5239  calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5240  calling->recv = Qundef;
5241  calling->heap_argv = 0;
5242  struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5243 
5244  return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5245 }
5246 
5247 /* ruby iseq -> ruby block */
5248 
5249 static VALUE
5250 vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5251  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5252  bool is_lambda, VALUE block_handler)
5253 {
5254  const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5255  const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5256  const int arg_size = ISEQ_BODY(iseq)->param.size;
5257  VALUE * const rsp = GET_SP() - calling->argc;
5258  VALUE * const argv = rsp;
5259  int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5260 
5261  SET_SP(rsp);
5262 
5263  vm_push_frame(ec, iseq,
5264  VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
5265  captured->self,
5266  VM_GUARDED_PREV_EP(captured->ep), 0,
5267  ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5268  rsp + arg_size,
5269  ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5270 
5271  return Qundef;
5272 }
5273 
5274 static VALUE
5275 vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5276  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5277  MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5278 {
5279  VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5280  int flags = vm_ci_flag(ci);
5281 
5282  if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5283  ((calling->argc == 0) ||
5284  (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5285  (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5286  ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5287  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5288  flags = 0;
5289  if (UNLIKELY(calling->heap_argv)) {
5290 #if VM_ARGC_STACK_MAX < 0
5291  if (RARRAY_LEN(calling->heap_argv) < 1) {
5292  rb_raise(rb_eArgError, "no receiver given");
5293  }
5294 #endif
5295  calling->recv = rb_ary_shift(calling->heap_argv);
5296  // Modify stack to avoid cfp consistency error
5297  reg_cfp->sp++;
5298  reg_cfp->sp[-1] = reg_cfp->sp[-2];
5299  reg_cfp->sp[-2] = calling->recv;
5300  flags |= VM_CALL_ARGS_SPLAT;
5301  }
5302  else {
5303  if (calling->argc < 1) {
5304  rb_raise(rb_eArgError, "no receiver given");
5305  }
5306  calling->recv = TOPN(--calling->argc);
5307  }
5308  if (calling->kw_splat) {
5309  flags |= VM_CALL_KW_SPLAT;
5310  }
5311  }
5312  else {
5313  if (calling->argc < 1) {
5314  rb_raise(rb_eArgError, "no receiver given");
5315  }
5316  calling->recv = TOPN(--calling->argc);
5317  }
5318 
5319  return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5320 }
5321 
5322 static VALUE
5323 vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5324  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5325  MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5326 {
5327  VALUE val;
5328  int argc;
5329  const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5330  CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5331  argc = calling->argc;
5332  val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5333  POPN(argc); /* TODO: should put before C/yield? */
5334  return val;
5335 }
5336 
5337 static VALUE
5338 vm_proc_to_block_handler(VALUE procval)
5339 {
5340  const struct rb_block *block = vm_proc_block(procval);
5341 
5342  switch (vm_block_type(block)) {
5343  case block_type_iseq:
5344  return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5345  case block_type_ifunc:
5346  return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5347  case block_type_symbol:
5348  return VM_BH_FROM_SYMBOL(block->as.symbol);
5349  case block_type_proc:
5350  return VM_BH_FROM_PROC(block->as.proc);
5351  }
5352  VM_UNREACHABLE(vm_yield_with_proc);
5353  return Qundef;
5354 }
5355 
5356 static VALUE
5357 vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5358  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5359  bool is_lambda, VALUE block_handler)
5360 {
5361  while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5362  VALUE proc = VM_BH_TO_PROC(block_handler);
5363  is_lambda = block_proc_is_lambda(proc);
5364  block_handler = vm_proc_to_block_handler(proc);
5365  }
5366 
5367  return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5368 }
5369 
5370 static inline VALUE
5371 vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5372  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5373  bool is_lambda, VALUE block_handler)
5374 {
5375  VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5376  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5377  bool is_lambda, VALUE block_handler);
5378 
5379  switch (vm_block_handler_type(block_handler)) {
5380  case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5381  case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5382  case block_handler_type_proc: func = vm_invoke_proc_block; break;
5383  case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5384  default: rb_bug("vm_invoke_block: unreachable");
5385  }
5386 
5387  return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5388 }
5389 
5390 static VALUE
5391 vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5392 {
5393  const rb_execution_context_t *ec = GET_EC();
5394  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5395  struct rb_captured_block *captured;
5396 
5397  if (cfp == 0) {
5398  rb_bug("vm_make_proc_with_iseq: unreachable");
5399  }
5400 
5401  captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5402  captured->code.iseq = blockiseq;
5403 
5404  return rb_vm_make_proc(ec, captured, rb_cProc);
5405 }
5406 
5407 static VALUE
5408 vm_once_exec(VALUE iseq)
5409 {
5410  VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5411  return rb_proc_call_with_block(proc, 0, 0, Qnil);
5412 }
5413 
5414 static VALUE
5415 vm_once_clear(VALUE data)
5416 {
5417  union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5418  is->once.running_thread = NULL;
5419  return Qnil;
5420 }
5421 
5422 /* defined insn */
5423 
5424 static bool
5425 check_respond_to_missing(VALUE obj, VALUE v)
5426 {
5427  VALUE args[2];
5428  VALUE r;
5429 
5430  args[0] = obj; args[1] = Qfalse;
5431  r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5432  if (!UNDEF_P(r) && RTEST(r)) {
5433  return true;
5434  }
5435  else {
5436  return false;
5437  }
5438 }
5439 
5440 static bool
5441 vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5442 {
5443  VALUE klass;
5444  enum defined_type type = (enum defined_type)op_type;
5445 
5446  switch (type) {
5447  case DEFINED_IVAR:
5448  return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5449  break;
5450  case DEFINED_GVAR:
5451  return rb_gvar_defined(SYM2ID(obj));
5452  break;
5453  case DEFINED_CVAR: {
5454  const rb_cref_t *cref = vm_get_cref(GET_EP());
5455  klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5456  return rb_cvar_defined(klass, SYM2ID(obj));
5457  break;
5458  }
5459  case DEFINED_CONST:
5460  case DEFINED_CONST_FROM: {
5461  bool allow_nil = type == DEFINED_CONST;
5462  klass = v;
5463  return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5464  break;
5465  }
5466  case DEFINED_FUNC:
5467  klass = CLASS_OF(v);
5468  return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5469  break;
5470  case DEFINED_METHOD:{
5471  VALUE klass = CLASS_OF(v);
5472  const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5473 
5474  if (me) {
5475  switch (METHOD_ENTRY_VISI(me)) {
5476  case METHOD_VISI_PRIVATE:
5477  break;
5478  case METHOD_VISI_PROTECTED:
5479  if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5480  break;
5481  }
5482  case METHOD_VISI_PUBLIC:
5483  return true;
5484  break;
5485  default:
5486  rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5487  }
5488  }
5489  else {
5490  return check_respond_to_missing(obj, v);
5491  }
5492  break;
5493  }
5494  case DEFINED_YIELD:
5495  if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5496  return true;
5497  }
5498  break;
5499  case DEFINED_ZSUPER:
5500  {
5501  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5502 
5503  if (me) {
5504  VALUE klass = vm_search_normal_superclass(me->defined_class);
5505  if (!klass) return false;
5506 
5507  ID id = me->def->original_id;
5508 
5509  return rb_method_boundp(klass, id, 0);
5510  }
5511  }
5512  break;
5513  case DEFINED_REF:
5514  return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5515  default:
5516  rb_bug("unimplemented defined? type (VM)");
5517  break;
5518  }
5519 
5520  return false;
5521 }
5522 
5523 bool
5524 rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5525 {
5526  return vm_defined(ec, reg_cfp, op_type, obj, v);
5527 }
5528 
5529 static const VALUE *
5530 vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5531 {
5532  rb_num_t i;
5533  const VALUE *ep = reg_ep;
5534  for (i = 0; i < lv; i++) {
5535  ep = GET_PREV_EP(ep);
5536  }
5537  return ep;
5538 }
5539 
5540 static VALUE
5541 vm_get_special_object(const VALUE *const reg_ep,
5542  enum vm_special_object_type type)
5543 {
5544  switch (type) {
5545  case VM_SPECIAL_OBJECT_VMCORE:
5546  return rb_mRubyVMFrozenCore;
5547  case VM_SPECIAL_OBJECT_CBASE:
5548  return vm_get_cbase(reg_ep);
5549  case VM_SPECIAL_OBJECT_CONST_BASE:
5550  return vm_get_const_base(reg_ep);
5551  default:
5552  rb_bug("putspecialobject insn: unknown value_type %d", type);
5553  }
5554 }
5555 
5556 static VALUE
5557 vm_concat_array(VALUE ary1, VALUE ary2st)
5558 {
5559  const VALUE ary2 = ary2st;
5560  VALUE tmp1 = rb_check_to_array(ary1);
5561  VALUE tmp2 = rb_check_to_array(ary2);
5562 
5563  if (NIL_P(tmp1)) {
5564  tmp1 = rb_ary_new3(1, ary1);
5565  }
5566  if (tmp1 == ary1) {
5567  tmp1 = rb_ary_dup(ary1);
5568  }
5569 
5570  if (NIL_P(tmp2)) {
5571  return rb_ary_push(tmp1, ary2);
5572  } else {
5573  return rb_ary_concat(tmp1, tmp2);
5574  }
5575 }
5576 
5577 static VALUE
5578 vm_concat_to_array(VALUE ary1, VALUE ary2st)
5579 {
5580  /* ary1 must be a newly created array */
5581  const VALUE ary2 = ary2st;
5582  VALUE tmp2 = rb_check_to_array(ary2);
5583 
5584  if (NIL_P(tmp2)) {
5585  return rb_ary_push(ary1, ary2);
5586  } else {
5587  return rb_ary_concat(ary1, tmp2);
5588  }
5589 }
5590 
5591 // YJIT implementation is using the C function
5592 // and needs to call a non-static function
5593 VALUE
5594 rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5595 {
5596  return vm_concat_array(ary1, ary2st);
5597 }
5598 
5599 VALUE
5600 rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5601 {
5602  return vm_concat_to_array(ary1, ary2st);
5603 }
5604 
5605 static VALUE
5606 vm_splat_array(VALUE flag, VALUE ary)
5607 {
5608  VALUE tmp = rb_check_to_array(ary);
5609  if (NIL_P(tmp)) {
5610  return rb_ary_new3(1, ary);
5611  }
5612  else if (RTEST(flag)) {
5613  return rb_ary_dup(tmp);
5614  }
5615  else {
5616  return tmp;
5617  }
5618 }
5619 
5620 // YJIT implementation is using the C function
5621 // and needs to call a non-static function
5622 VALUE
5623 rb_vm_splat_array(VALUE flag, VALUE ary)
5624 {
5625  return vm_splat_array(flag, ary);
5626 }
5627 
5628 static VALUE
5629 vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5630 {
5631  enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5632 
5633  if (flag & VM_CHECKMATCH_ARRAY) {
5634  long i;
5635  const long n = RARRAY_LEN(pattern);
5636 
5637  for (i = 0; i < n; i++) {
5638  VALUE v = RARRAY_AREF(pattern, i);
5639  VALUE c = check_match(ec, v, target, type);
5640 
5641  if (RTEST(c)) {
5642  return c;
5643  }
5644  }
5645  return Qfalse;
5646  }
5647  else {
5648  return check_match(ec, pattern, target, type);
5649  }
5650 }
5651 
5652 VALUE
5653 rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5654 {
5655  return vm_check_match(ec, target, pattern, flag);
5656 }
5657 
5658 static VALUE
5659 vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5660 {
5661  const VALUE kw_bits = *(ep - bits);
5662 
5663  if (FIXNUM_P(kw_bits)) {
5664  unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5665  if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5666  return Qfalse;
5667  }
5668  else {
5669  VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5670  if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5671  }
5672  return Qtrue;
5673 }
5674 
5675 static void
5676 vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5677 {
5678  if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5679  RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5680  RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5681  RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5682 
5683  switch (flag) {
5684  case RUBY_EVENT_CALL:
5685  RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5686  return;
5687  case RUBY_EVENT_C_CALL:
5688  RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5689  return;
5690  case RUBY_EVENT_RETURN:
5691  RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5692  return;
5693  case RUBY_EVENT_C_RETURN:
5694  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5695  return;
5696  }
5697  }
5698 }
5699 
5700 static VALUE
5701 vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5702 {
5703  if (!rb_const_defined_at(cbase, id)) {
5704  return 0;
5705  }
5706  else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5707  return rb_public_const_get_at(cbase, id);
5708  }
5709  else {
5710  return rb_const_get_at(cbase, id);
5711  }
5712 }
5713 
5714 static VALUE
5715 vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5716 {
5717  if (!RB_TYPE_P(klass, T_CLASS)) {
5718  return 0;
5719  }
5720  else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5721  VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5722 
5723  if (tmp != super) {
5725  "superclass mismatch for class %"PRIsVALUE"",
5726  rb_id2str(id));
5727  }
5728  else {
5729  return klass;
5730  }
5731  }
5732  else {
5733  return klass;
5734  }
5735 }
5736 
5737 static VALUE
5738 vm_check_if_module(ID id, VALUE mod)
5739 {
5740  if (!RB_TYPE_P(mod, T_MODULE)) {
5741  return 0;
5742  }
5743  else {
5744  return mod;
5745  }
5746 }
5747 
5748 static VALUE
5749 declare_under(ID id, VALUE cbase, VALUE c)
5750 {
5751  rb_set_class_path_string(c, cbase, rb_id2str(id));
5752  rb_const_set(cbase, id, c);
5753  return c;
5754 }
5755 
5756 static VALUE
5757 vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5758 {
5759  /* new class declaration */
5760  VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5761  VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5763  rb_class_inherited(s, c);
5764  return c;
5765 }
5766 
5767 static VALUE
5768 vm_declare_module(ID id, VALUE cbase)
5769 {
5770  /* new module declaration */
5771  return declare_under(id, cbase, rb_module_new());
5772 }
5773 
5774 NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5775 static void
5776 unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5777 {
5778  VALUE name = rb_id2str(id);
5779  VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5780  name, type);
5781  VALUE location = rb_const_source_location_at(cbase, id);
5782  if (!NIL_P(location)) {
5783  rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5784  " previous definition of %"PRIsVALUE" was here",
5785  rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5786  }
5788 }
5789 
5790 static VALUE
5791 vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5792 {
5793  VALUE klass;
5794 
5795  if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5797  "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5798  rb_obj_class(super));
5799  }
5800 
5801  vm_check_if_namespace(cbase);
5802 
5803  /* find klass */
5804  rb_autoload_load(cbase, id);
5805  if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5806  if (!vm_check_if_class(id, flags, super, klass))
5807  unmatched_redefinition("class", cbase, id, klass);
5808  return klass;
5809  }
5810  else {
5811  return vm_declare_class(id, flags, cbase, super);
5812  }
5813 }
5814 
5815 static VALUE
5816 vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5817 {
5818  VALUE mod;
5819 
5820  vm_check_if_namespace(cbase);
5821  if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5822  if (!vm_check_if_module(id, mod))
5823  unmatched_redefinition("module", cbase, id, mod);
5824  return mod;
5825  }
5826  else {
5827  return vm_declare_module(id, cbase);
5828  }
5829 }
5830 
5831 static VALUE
5832 vm_find_or_create_class_by_id(ID id,
5833  rb_num_t flags,
5834  VALUE cbase,
5835  VALUE super)
5836 {
5837  rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5838 
5839  switch (type) {
5840  case VM_DEFINECLASS_TYPE_CLASS:
5841  /* classdef returns class scope value */
5842  return vm_define_class(id, flags, cbase, super);
5843 
5844  case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5845  /* classdef returns class scope value */
5846  return rb_singleton_class(cbase);
5847 
5848  case VM_DEFINECLASS_TYPE_MODULE:
5849  /* classdef returns class scope value */
5850  return vm_define_module(id, flags, cbase);
5851 
5852  default:
5853  rb_bug("unknown defineclass type: %d", (int)type);
5854  }
5855 }
5856 
5857 static rb_method_visibility_t
5858 vm_scope_visibility_get(const rb_execution_context_t *ec)
5859 {
5860  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5861 
5862  if (!vm_env_cref_by_cref(cfp->ep)) {
5863  return METHOD_VISI_PUBLIC;
5864  }
5865  else {
5866  return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5867  }
5868 }
5869 
5870 static int
5871 vm_scope_module_func_check(const rb_execution_context_t *ec)
5872 {
5873  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5874 
5875  if (!vm_env_cref_by_cref(cfp->ep)) {
5876  return FALSE;
5877  }
5878  else {
5879  return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5880  }
5881 }
5882 
5883 static void
5884 vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5885 {
5886  VALUE klass;
5887  rb_method_visibility_t visi;
5888  rb_cref_t *cref = vm_ec_cref(ec);
5889 
5890  if (is_singleton) {
5891  klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5892  visi = METHOD_VISI_PUBLIC;
5893  }
5894  else {
5895  klass = CREF_CLASS_FOR_DEFINITION(cref);
5896  visi = vm_scope_visibility_get(ec);
5897  }
5898 
5899  if (NIL_P(klass)) {
5900  rb_raise(rb_eTypeError, "no class/module to add method");
5901  }
5902 
5903  rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5904  // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5905  if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5906 
5907  RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
5908  }
5909 
5910  if (!is_singleton && vm_scope_module_func_check(ec)) {
5911  klass = rb_singleton_class(klass);
5912  rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5913  }
5914 }
5915 
5916 static VALUE
5917 vm_invokeblock_i(struct rb_execution_context_struct *ec,
5918  struct rb_control_frame_struct *reg_cfp,
5919  struct rb_calling_info *calling)
5920 {
5921  const struct rb_callinfo *ci = calling->cd->ci;
5922  VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5923 
5924  if (block_handler == VM_BLOCK_HANDLER_NONE) {
5925  rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5926  }
5927  else {
5928  return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5929  }
5930 }
5931 
5932 enum method_explorer_type {
5933  mexp_search_method,
5934  mexp_search_invokeblock,
5935  mexp_search_super,
5936 };
5937 
5938 static inline VALUE
5939 vm_sendish(
5940  struct rb_execution_context_struct *ec,
5941  struct rb_control_frame_struct *reg_cfp,
5942  struct rb_call_data *cd,
5943  VALUE block_handler,
5944  enum method_explorer_type method_explorer
5945 ) {
5946  VALUE val = Qundef;
5947  const struct rb_callinfo *ci = cd->ci;
5948  const struct rb_callcache *cc;
5949  int argc = vm_ci_argc(ci);
5950  VALUE recv = TOPN(argc);
5951  struct rb_calling_info calling = {
5952  .block_handler = block_handler,
5953  .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5954  .recv = recv,
5955  .argc = argc,
5956  .cd = cd,
5957  };
5958 
5959  switch (method_explorer) {
5960  case mexp_search_method:
5961  calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5962  val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5963  break;
5964  case mexp_search_super:
5965  calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5966  val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5967  break;
5968  case mexp_search_invokeblock:
5969  val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5970  break;
5971  }
5972  return val;
5973 }
5974 
5975 VALUE
5976 rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5977 {
5978  stack_check(ec);
5979 
5980  struct rb_forwarding_call_data adjusted_cd;
5981  struct rb_callinfo adjusted_ci;
5982 
5983  VALUE bh;
5984  VALUE val;
5985 
5986  if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5987  bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5988 
5989  val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5990 
5991  if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
5992  RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
5993  }
5994  }
5995  else {
5996  bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
5997  val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
5998  }
5999 
6000  VM_EXEC(ec, val);
6001  return val;
6002 }
6003 
6004 VALUE
6005 rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6006 {
6007  stack_check(ec);
6008  VALUE bh = VM_BLOCK_HANDLER_NONE;
6009  VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6010  VM_EXEC(ec, val);
6011  return val;
6012 }
6013 
6014 VALUE
6015 rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6016 {
6017  stack_check(ec);
6018  struct rb_forwarding_call_data adjusted_cd;
6019  struct rb_callinfo adjusted_ci;
6020 
6021  VALUE bh;
6022  VALUE val;
6023 
6024  if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6025  bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6026 
6027  val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6028 
6029  if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6030  RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6031  }
6032  }
6033  else {
6034  bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6035  val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6036  }
6037 
6038  VM_EXEC(ec, val);
6039  return val;
6040 }
6041 
6042 VALUE
6043 rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6044 {
6045  stack_check(ec);
6046  VALUE bh = VM_BLOCK_HANDLER_NONE;
6047  VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6048  VM_EXEC(ec, val);
6049  return val;
6050 }
6051 
6052 /* object.c */
6053 VALUE rb_nil_to_s(VALUE);
6054 VALUE rb_true_to_s(VALUE);
6055 VALUE rb_false_to_s(VALUE);
6056 /* numeric.c */
6057 VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6058 VALUE rb_fix_to_s(VALUE);
6059 /* variable.c */
6060 VALUE rb_mod_to_s(VALUE);
6062 
6063 static VALUE
6064 vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6065 {
6066  int type = TYPE(recv);
6067  if (type == T_STRING) {
6068  return recv;
6069  }
6070 
6071  const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6072 
6073  switch (type) {
6074  case T_SYMBOL:
6075  if (check_cfunc(vm_cc_cme(cc), rb_sym_to_s)) {
6076  // rb_sym_to_s() allocates a mutable string, but since we are only
6077  // going to use this string for interpolation, it's fine to use the
6078  // frozen string.
6079  return rb_sym2str(recv);
6080  }
6081  break;
6082  case T_MODULE:
6083  case T_CLASS:
6084  if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6085  // rb_mod_to_s() allocates a mutable string, but since we are only
6086  // going to use this string for interpolation, it's fine to use the
6087  // frozen string.
6088  VALUE val = rb_mod_name(recv);
6089  if (NIL_P(val)) {
6090  val = rb_mod_to_s(recv);
6091  }
6092  return val;
6093  }
6094  break;
6095  case T_NIL:
6096  if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6097  return rb_nil_to_s(recv);
6098  }
6099  break;
6100  case T_TRUE:
6101  if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6102  return rb_true_to_s(recv);
6103  }
6104  break;
6105  case T_FALSE:
6106  if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6107  return rb_false_to_s(recv);
6108  }
6109  break;
6110  case T_FIXNUM:
6111  if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6112  return rb_fix_to_s(recv);
6113  }
6114  break;
6115  }
6116  return Qundef;
6117 }
6118 
6119 static VALUE
6120 vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6121 {
6122  if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6123  return ary;
6124  }
6125  else {
6126  return Qundef;
6127  }
6128 }
6129 
6130 static VALUE
6131 vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6132 {
6133  if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6134  return hash;
6135  }
6136  else {
6137  return Qundef;
6138  }
6139 }
6140 
6141 static VALUE
6142 vm_opt_str_freeze(VALUE str, int bop, ID id)
6143 {
6144  if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6145  return str;
6146  }
6147  else {
6148  return Qundef;
6149  }
6150 }
6151 
6152 /* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6153 #define id_cmp idCmp
6154 
6155 static VALUE
6156 vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6157 {
6158  if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6159  if (num == 0) {
6160  return Qnil;
6161  }
6162  else {
6163  VALUE result = *ptr;
6164  rb_snum_t i = num - 1;
6165  while (i-- > 0) {
6166  const VALUE v = *++ptr;
6167  if (OPTIMIZED_CMP(v, result) > 0) {
6168  result = v;
6169  }
6170  }
6171  return result;
6172  }
6173  }
6174  else {
6175  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6176  }
6177 }
6178 
6179 VALUE
6180 rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6181 {
6182  return vm_opt_newarray_max(ec, num, ptr);
6183 }
6184 
6185 static VALUE
6186 vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6187 {
6188  if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6189  if (num == 0) {
6190  return Qnil;
6191  }
6192  else {
6193  VALUE result = *ptr;
6194  rb_snum_t i = num - 1;
6195  while (i-- > 0) {
6196  const VALUE v = *++ptr;
6197  if (OPTIMIZED_CMP(v, result) < 0) {
6198  result = v;
6199  }
6200  }
6201  return result;
6202  }
6203  }
6204  else {
6205  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6206  }
6207 }
6208 
6209 VALUE
6210 rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6211 {
6212  return vm_opt_newarray_min(ec, num, ptr);
6213 }
6214 
6215 static VALUE
6216 vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6217 {
6218  // If Array#hash is _not_ monkeypatched, use the optimized call
6219  if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6220  return rb_ary_hash_values(num, ptr);
6221  }
6222  else {
6223  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6224  }
6225 }
6226 
6227 VALUE
6228 rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6229 {
6230  return vm_opt_newarray_hash(ec, num, ptr);
6231 }
6232 
6233 VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6234 VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6235 
6236 static VALUE
6237 vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6238 {
6239  if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6240  struct RArray fake_ary;
6241  VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6242  return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6243  }
6244  else {
6245  // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6246  // Setup an array with room for keyword hash.
6247  VALUE args[2];
6248  args[0] = fmt;
6249  int kw_splat = RB_NO_KEYWORDS;
6250  int argc = 1;
6251 
6252  if (!UNDEF_P(buffer)) {
6253  args[1] = rb_hash_new_with_size(1);
6254  rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6255  kw_splat = RB_PASS_KEYWORDS;
6256  argc++;
6257  }
6258 
6259  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6260  }
6261 }
6262 
6263 VALUE
6264 rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6265 {
6266  return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6267 }
6268 
6269 VALUE
6270 rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6271 {
6272  return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6273 }
6274 
6275 #undef id_cmp
6276 
6277 static void
6278 vm_track_constant_cache(ID id, void *ic)
6279 {
6280  struct rb_id_table *const_cache = GET_VM()->constant_cache;
6281  VALUE lookup_result;
6282  st_table *ics;
6283 
6284  if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6285  ics = (st_table *)lookup_result;
6286  }
6287  else {
6288  ics = st_init_numtable();
6289  rb_id_table_insert(const_cache, id, (VALUE)ics);
6290  }
6291 
6292  st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
6293 }
6294 
6295 static void
6296 vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6297 {
6298  RB_VM_LOCK_ENTER();
6299 
6300  for (int i = 0; segments[i]; i++) {
6301  ID id = segments[i];
6302  if (id == idNULL) continue;
6303  vm_track_constant_cache(id, ic);
6304  }
6305 
6306  RB_VM_LOCK_LEAVE();
6307 }
6308 
6309 // For RJIT inlining
6310 static inline bool
6311 vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6312 {
6313  if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6314  VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6315 
6316  return (ic_cref == NULL || // no need to check CREF
6317  ic_cref == vm_get_cref(reg_ep));
6318  }
6319  return false;
6320 }
6321 
6322 static bool
6323 vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6324 {
6325  VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6326  return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6327 }
6328 
6329 // YJIT needs this function to never allocate and never raise
6330 bool
6331 rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6332 {
6333  return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6334 }
6335 
6336 static void
6337 vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6338 {
6339  if (ruby_vm_const_missing_count > 0) {
6340  ruby_vm_const_missing_count = 0;
6341  ic->entry = NULL;
6342  return;
6343  }
6344 
6345  struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6346  RB_OBJ_WRITE(ice, &ice->value, val);
6347  ice->ic_cref = vm_get_const_key_cref(reg_ep);
6348  if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6349  RB_OBJ_WRITE(iseq, &ic->entry, ice);
6350 
6351  RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6352  unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6353  rb_yjit_constant_ic_update(iseq, ic, pos);
6354  rb_rjit_constant_ic_update(iseq, ic, pos);
6355 }
6356 
6357 VALUE
6358 rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6359 {
6360  VALUE val;
6361  const ID *segments = ic->segments;
6362  struct iseq_inline_constant_cache_entry *ice = ic->entry;
6363  if (ice && vm_ic_hit_p(ice, GET_EP())) {
6364  val = ice->value;
6365 
6366  VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6367  }
6368  else {
6369  ruby_vm_constant_cache_misses++;
6370  val = vm_get_ev_const_chain(ec, segments);
6371  vm_ic_track_const_chain(GET_CFP(), ic, segments);
6372  // Undo the PC increment to get the address to this instruction
6373  // INSN_ATTR(width) == 2
6374  vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6375  }
6376  return val;
6377 }
6378 
6379 static VALUE
6380 vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6381 {
6382  rb_thread_t *th = rb_ec_thread_ptr(ec);
6383  rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6384 
6385  again:
6386  if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6387  return is->once.value;
6388  }
6389  else if (is->once.running_thread == NULL) {
6390  VALUE val;
6391  is->once.running_thread = th;
6392  val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6393  RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6394  /* is->once.running_thread is cleared by vm_once_clear() */
6395  is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6396  return val;
6397  }
6398  else if (is->once.running_thread == th) {
6399  /* recursive once */
6400  return vm_once_exec((VALUE)iseq);
6401  }
6402  else {
6403  /* waiting for finish */
6404  RUBY_VM_CHECK_INTS(ec);
6406  goto again;
6407  }
6408 }
6409 
6410 static OFFSET
6411 vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6412 {
6413  switch (OBJ_BUILTIN_TYPE(key)) {
6414  case -1:
6415  case T_FLOAT:
6416  case T_SYMBOL:
6417  case T_BIGNUM:
6418  case T_STRING:
6419  if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6420  SYMBOL_REDEFINED_OP_FLAG |
6421  INTEGER_REDEFINED_OP_FLAG |
6422  FLOAT_REDEFINED_OP_FLAG |
6423  NIL_REDEFINED_OP_FLAG |
6424  TRUE_REDEFINED_OP_FLAG |
6425  FALSE_REDEFINED_OP_FLAG |
6426  STRING_REDEFINED_OP_FLAG)) {
6427  st_data_t val;
6428  if (RB_FLOAT_TYPE_P(key)) {
6429  double kval = RFLOAT_VALUE(key);
6430  if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6431  key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6432  }
6433  }
6434  if (rb_hash_stlike_lookup(hash, key, &val)) {
6435  return FIX2LONG((VALUE)val);
6436  }
6437  else {
6438  return else_offset;
6439  }
6440  }
6441  }
6442  return 0;
6443 }
6444 
6445 NORETURN(static void
6446  vm_stack_consistency_error(const rb_execution_context_t *ec,
6447  const rb_control_frame_t *,
6448  const VALUE *));
6449 static void
6450 vm_stack_consistency_error(const rb_execution_context_t *ec,
6451  const rb_control_frame_t *cfp,
6452  const VALUE *bp)
6453 {
6454  const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6455  const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6456  static const char stack_consistency_error[] =
6457  "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6458 #if defined RUBY_DEVEL
6459  VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6460  rb_str_cat_cstr(mesg, "\n");
6461  rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6463 #else
6464  rb_bug(stack_consistency_error, nsp, nbp);
6465 #endif
6466 }
6467 
6468 static VALUE
6469 vm_opt_plus(VALUE recv, VALUE obj)
6470 {
6471  if (FIXNUM_2_P(recv, obj) &&
6472  BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6473  return rb_fix_plus_fix(recv, obj);
6474  }
6475  else if (FLONUM_2_P(recv, obj) &&
6476  BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6477  return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6478  }
6479  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6480  return Qundef;
6481  }
6482  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6483  RBASIC_CLASS(obj) == rb_cFloat &&
6484  BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6485  return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6486  }
6487  else if (RBASIC_CLASS(recv) == rb_cString &&
6488  RBASIC_CLASS(obj) == rb_cString &&
6489  BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6490  return rb_str_opt_plus(recv, obj);
6491  }
6492  else if (RBASIC_CLASS(recv) == rb_cArray &&
6493  RBASIC_CLASS(obj) == rb_cArray &&
6494  BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6495  return rb_ary_plus(recv, obj);
6496  }
6497  else {
6498  return Qundef;
6499  }
6500 }
6501 
6502 static VALUE
6503 vm_opt_minus(VALUE recv, VALUE obj)
6504 {
6505  if (FIXNUM_2_P(recv, obj) &&
6506  BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6507  return rb_fix_minus_fix(recv, obj);
6508  }
6509  else if (FLONUM_2_P(recv, obj) &&
6510  BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6511  return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6512  }
6513  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6514  return Qundef;
6515  }
6516  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6517  RBASIC_CLASS(obj) == rb_cFloat &&
6518  BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6519  return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6520  }
6521  else {
6522  return Qundef;
6523  }
6524 }
6525 
6526 static VALUE
6527 vm_opt_mult(VALUE recv, VALUE obj)
6528 {
6529  if (FIXNUM_2_P(recv, obj) &&
6530  BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6531  return rb_fix_mul_fix(recv, obj);
6532  }
6533  else if (FLONUM_2_P(recv, obj) &&
6534  BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6535  return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6536  }
6537  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6538  return Qundef;
6539  }
6540  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6541  RBASIC_CLASS(obj) == rb_cFloat &&
6542  BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6543  return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6544  }
6545  else {
6546  return Qundef;
6547  }
6548 }
6549 
6550 static VALUE
6551 vm_opt_div(VALUE recv, VALUE obj)
6552 {
6553  if (FIXNUM_2_P(recv, obj) &&
6554  BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6555  return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6556  }
6557  else if (FLONUM_2_P(recv, obj) &&
6558  BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6559  return rb_flo_div_flo(recv, obj);
6560  }
6561  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6562  return Qundef;
6563  }
6564  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6565  RBASIC_CLASS(obj) == rb_cFloat &&
6566  BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6567  return rb_flo_div_flo(recv, obj);
6568  }
6569  else {
6570  return Qundef;
6571  }
6572 }
6573 
6574 static VALUE
6575 vm_opt_mod(VALUE recv, VALUE obj)
6576 {
6577  if (FIXNUM_2_P(recv, obj) &&
6578  BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6579  return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6580  }
6581  else if (FLONUM_2_P(recv, obj) &&
6582  BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6583  return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6584  }
6585  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6586  return Qundef;
6587  }
6588  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6589  RBASIC_CLASS(obj) == rb_cFloat &&
6590  BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6591  return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6592  }
6593  else {
6594  return Qundef;
6595  }
6596 }
6597 
6598 static VALUE
6599 vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6600 {
6601  if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6602  VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6603 
6604  if (!UNDEF_P(val)) {
6605  return RBOOL(!RTEST(val));
6606  }
6607  }
6608 
6609  return Qundef;
6610 }
6611 
6612 static VALUE
6613 vm_opt_lt(VALUE recv, VALUE obj)
6614 {
6615  if (FIXNUM_2_P(recv, obj) &&
6616  BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6617  return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6618  }
6619  else if (FLONUM_2_P(recv, obj) &&
6620  BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6621  return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6622  }
6623  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6624  return Qundef;
6625  }
6626  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6627  RBASIC_CLASS(obj) == rb_cFloat &&
6628  BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6629  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6630  return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6631  }
6632  else {
6633  return Qundef;
6634  }
6635 }
6636 
6637 static VALUE
6638 vm_opt_le(VALUE recv, VALUE obj)
6639 {
6640  if (FIXNUM_2_P(recv, obj) &&
6641  BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6642  return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6643  }
6644  else if (FLONUM_2_P(recv, obj) &&
6645  BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6646  return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6647  }
6648  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6649  return Qundef;
6650  }
6651  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6652  RBASIC_CLASS(obj) == rb_cFloat &&
6653  BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6654  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6655  return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6656  }
6657  else {
6658  return Qundef;
6659  }
6660 }
6661 
6662 static VALUE
6663 vm_opt_gt(VALUE recv, VALUE obj)
6664 {
6665  if (FIXNUM_2_P(recv, obj) &&
6666  BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6667  return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6668  }
6669  else if (FLONUM_2_P(recv, obj) &&
6670  BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6671  return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6672  }
6673  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6674  return Qundef;
6675  }
6676  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6677  RBASIC_CLASS(obj) == rb_cFloat &&
6678  BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6679  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6680  return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6681  }
6682  else {
6683  return Qundef;
6684  }
6685 }
6686 
6687 static VALUE
6688 vm_opt_ge(VALUE recv, VALUE obj)
6689 {
6690  if (FIXNUM_2_P(recv, obj) &&
6691  BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6692  return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6693  }
6694  else if (FLONUM_2_P(recv, obj) &&
6695  BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6696  return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6697  }
6698  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6699  return Qundef;
6700  }
6701  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6702  RBASIC_CLASS(obj) == rb_cFloat &&
6703  BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6704  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6705  return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6706  }
6707  else {
6708  return Qundef;
6709  }
6710 }
6711 
6712 
6713 static VALUE
6714 vm_opt_ltlt(VALUE recv, VALUE obj)
6715 {
6716  if (SPECIAL_CONST_P(recv)) {
6717  return Qundef;
6718  }
6719  else if (RBASIC_CLASS(recv) == rb_cString &&
6720  BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6721  if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6722  return rb_str_buf_append(recv, obj);
6723  }
6724  else {
6725  return rb_str_concat(recv, obj);
6726  }
6727  }
6728  else if (RBASIC_CLASS(recv) == rb_cArray &&
6729  BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6730  return rb_ary_push(recv, obj);
6731  }
6732  else {
6733  return Qundef;
6734  }
6735 }
6736 
6737 static VALUE
6738 vm_opt_and(VALUE recv, VALUE obj)
6739 {
6740  // If recv and obj are both fixnums, then the bottom tag bit
6741  // will be 1 on both. 1 & 1 == 1, so the result value will also
6742  // be a fixnum. If either side is *not* a fixnum, then the tag bit
6743  // will be 0, and we return Qundef.
6744  VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6745 
6746  if (FIXNUM_P(ret) &&
6747  BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6748  return ret;
6749  }
6750  else {
6751  return Qundef;
6752  }
6753 }
6754 
6755 static VALUE
6756 vm_opt_or(VALUE recv, VALUE obj)
6757 {
6758  if (FIXNUM_2_P(recv, obj) &&
6759  BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6760  return recv | obj;
6761  }
6762  else {
6763  return Qundef;
6764  }
6765 }
6766 
6767 static VALUE
6768 vm_opt_aref(VALUE recv, VALUE obj)
6769 {
6770  if (SPECIAL_CONST_P(recv)) {
6771  if (FIXNUM_2_P(recv, obj) &&
6772  BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6773  return rb_fix_aref(recv, obj);
6774  }
6775  return Qundef;
6776  }
6777  else if (RBASIC_CLASS(recv) == rb_cArray &&
6778  BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6779  if (FIXNUM_P(obj)) {
6780  return rb_ary_entry_internal(recv, FIX2LONG(obj));
6781  }
6782  else {
6783  return rb_ary_aref1(recv, obj);
6784  }
6785  }
6786  else if (RBASIC_CLASS(recv) == rb_cHash &&
6787  BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6788  return rb_hash_aref(recv, obj);
6789  }
6790  else {
6791  return Qundef;
6792  }
6793 }
6794 
6795 static VALUE
6796 vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6797 {
6798  if (SPECIAL_CONST_P(recv)) {
6799  return Qundef;
6800  }
6801  else if (RBASIC_CLASS(recv) == rb_cArray &&
6802  BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6803  FIXNUM_P(obj)) {
6804  rb_ary_store(recv, FIX2LONG(obj), set);
6805  return set;
6806  }
6807  else if (RBASIC_CLASS(recv) == rb_cHash &&
6808  BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6809  rb_hash_aset(recv, obj, set);
6810  return set;
6811  }
6812  else {
6813  return Qundef;
6814  }
6815 }
6816 
6817 static VALUE
6818 vm_opt_aref_with(VALUE recv, VALUE key)
6819 {
6820  if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6821  BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6822  rb_hash_compare_by_id_p(recv) == Qfalse &&
6823  !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6824  return rb_hash_aref(recv, key);
6825  }
6826  else {
6827  return Qundef;
6828  }
6829 }
6830 
6831 VALUE
6832 rb_vm_opt_aref_with(VALUE recv, VALUE key)
6833 {
6834  return vm_opt_aref_with(recv, key);
6835 }
6836 
6837 static VALUE
6838 vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6839 {
6840  if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6841  BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6842  rb_hash_compare_by_id_p(recv) == Qfalse) {
6843  return rb_hash_aset(recv, key, val);
6844  }
6845  else {
6846  return Qundef;
6847  }
6848 }
6849 
6850 static VALUE
6851 vm_opt_length(VALUE recv, int bop)
6852 {
6853  if (SPECIAL_CONST_P(recv)) {
6854  return Qundef;
6855  }
6856  else if (RBASIC_CLASS(recv) == rb_cString &&
6857  BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6858  if (bop == BOP_EMPTY_P) {
6859  return LONG2NUM(RSTRING_LEN(recv));
6860  }
6861  else {
6862  return rb_str_length(recv);
6863  }
6864  }
6865  else if (RBASIC_CLASS(recv) == rb_cArray &&
6866  BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6867  return LONG2NUM(RARRAY_LEN(recv));
6868  }
6869  else if (RBASIC_CLASS(recv) == rb_cHash &&
6870  BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6871  return INT2FIX(RHASH_SIZE(recv));
6872  }
6873  else {
6874  return Qundef;
6875  }
6876 }
6877 
6878 static VALUE
6879 vm_opt_empty_p(VALUE recv)
6880 {
6881  switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6882  case Qundef: return Qundef;
6883  case INT2FIX(0): return Qtrue;
6884  default: return Qfalse;
6885  }
6886 }
6887 
6888 VALUE rb_false(VALUE obj);
6889 
6890 static VALUE
6891 vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6892 {
6893  if (NIL_P(recv) &&
6894  BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6895  return Qtrue;
6896  }
6897  else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6898  return Qfalse;
6899  }
6900  else {
6901  return Qundef;
6902  }
6903 }
6904 
6905 static VALUE
6906 fix_succ(VALUE x)
6907 {
6908  switch (x) {
6909  case ~0UL:
6910  /* 0xFFFF_FFFF == INT2FIX(-1)
6911  * `-1.succ` is of course 0. */
6912  return INT2FIX(0);
6913  case RSHIFT(~0UL, 1):
6914  /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6915  * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6916  return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6917  default:
6918  /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6919  * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6920  * == lx*2 + ly*2 + 1
6921  * == (lx*2+1) + (ly*2+1) - 1
6922  * == x + y - 1
6923  *
6924  * Here, if we put y := INT2FIX(1):
6925  *
6926  * == x + INT2FIX(1) - 1
6927  * == x + 2 .
6928  */
6929  return x + 2;
6930  }
6931 }
6932 
6933 static VALUE
6934 vm_opt_succ(VALUE recv)
6935 {
6936  if (FIXNUM_P(recv) &&
6937  BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
6938  return fix_succ(recv);
6939  }
6940  else if (SPECIAL_CONST_P(recv)) {
6941  return Qundef;
6942  }
6943  else if (RBASIC_CLASS(recv) == rb_cString &&
6944  BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
6945  return rb_str_succ(recv);
6946  }
6947  else {
6948  return Qundef;
6949  }
6950 }
6951 
6952 static VALUE
6953 vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6954 {
6955  if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
6956  return RBOOL(!RTEST(recv));
6957  }
6958  else {
6959  return Qundef;
6960  }
6961 }
6962 
6963 static VALUE
6964 vm_opt_regexpmatch2(VALUE recv, VALUE obj)
6965 {
6966  if (SPECIAL_CONST_P(recv)) {
6967  return Qundef;
6968  }
6969  else if (RBASIC_CLASS(recv) == rb_cString &&
6970  CLASS_OF(obj) == rb_cRegexp &&
6971  BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
6972  return rb_reg_match(obj, recv);
6973  }
6974  else if (RBASIC_CLASS(recv) == rb_cRegexp &&
6975  BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
6976  return rb_reg_match(recv, obj);
6977  }
6978  else {
6979  return Qundef;
6980  }
6981 }
6982 
6983 rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
6984 
6985 NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
6986 
6987 static inline void
6988 vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
6989  rb_event_flag_t pc_events, rb_event_flag_t target_event,
6990  rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
6991 {
6992  rb_event_flag_t event = pc_events & target_event;
6993  VALUE self = GET_SELF();
6994 
6995  VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
6996 
6997  if (event & global_hooks->events) {
6998  /* increment PC because source line is calculated with PC-1 */
6999  reg_cfp->pc++;
7000  vm_dtrace(event, ec);
7001  rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7002  reg_cfp->pc--;
7003  }
7004 
7005  // Load here since global hook above can add and free local hooks
7006  rb_hook_list_t *local_hooks = *local_hooks_ptr;
7007  if (local_hooks != NULL) {
7008  if (event & local_hooks->events) {
7009  /* increment PC because source line is calculated with PC-1 */
7010  reg_cfp->pc++;
7011  rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7012  reg_cfp->pc--;
7013  }
7014  }
7015 }
7016 
7017 #define VM_TRACE_HOOK(target_event, val) do { \
7018  if ((pc_events & (target_event)) & enabled_flags) { \
7019  vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7020  } \
7021 } while (0)
7022 
7023 static VALUE
7024 rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7025 {
7026  VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7027  VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7028  return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7029 }
7030 
7031 static void
7032 vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7033 {
7034  const VALUE *pc = reg_cfp->pc;
7035  rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7036  rb_event_flag_t global_events = enabled_flags;
7037 
7038  if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7039  return;
7040  }
7041  else {
7042  const rb_iseq_t *iseq = reg_cfp->iseq;
7043  VALUE iseq_val = (VALUE)iseq;
7044  size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7045  rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7046  rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7047  rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7048  rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7049  rb_hook_list_t *bmethod_local_hooks = NULL;
7050  rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7051  rb_event_flag_t bmethod_local_events = 0;
7052  const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7053  enabled_flags |= iseq_local_events;
7054 
7055  VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7056 
7057  if (bmethod_frame) {
7058  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7059  VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7060  bmethod_local_hooks = me->def->body.bmethod.hooks;
7061  bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7062  if (bmethod_local_hooks) {
7063  bmethod_local_events = bmethod_local_hooks->events;
7064  }
7065  }
7066 
7067 
7068  if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7069 #if 0
7070  /* disable trace */
7071  /* TODO: incomplete */
7072  rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7073 #else
7074  /* do not disable trace because of performance problem
7075  * (re-enable overhead)
7076  */
7077 #endif
7078  return;
7079  }
7080  else if (ec->trace_arg != NULL) {
7081  /* already tracing */
7082  return;
7083  }
7084  else {
7085  rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7086  /* Note, not considering iseq local events here since the same
7087  * iseq could be used in multiple bmethods. */
7088  rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7089 
7090  if (0) {
7091  ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7092  (int)pos,
7093  (int)pc_events,
7094  RSTRING_PTR(rb_iseq_path(iseq)),
7095  (int)rb_iseq_line_no(iseq, pos),
7096  RSTRING_PTR(rb_iseq_label(iseq)));
7097  }
7098  VM_ASSERT(reg_cfp->pc == pc);
7099  VM_ASSERT(pc_events != 0);
7100 
7101  /* check traces */
7102  if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7103  /* b_call instruction running as a method. Fire call event. */
7104  vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7105  }
7107  VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7108  VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7109  VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7110  VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7111  VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7112  if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7113  /* b_return instruction running as a method. Fire return event. */
7114  vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7115  }
7116 
7117  // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7118  // We need the pointer to stay valid in case compaction happens in a trace hook.
7119  //
7120  // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7121  // storage for `rb_method_definition_t` is not on the GC heap.
7122  RB_GC_GUARD(iseq_val);
7123  }
7124  }
7125 }
7126 #undef VM_TRACE_HOOK
7127 
7128 #if VM_CHECK_MODE > 0
7129 NORETURN( NOINLINE( COLDFUNC
7130 void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7131 
7132 void
7133 Init_vm_stack_canary(void)
7134 {
7135  /* This has to be called _after_ our PRNG is properly set up. */
7136  int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7137  vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7138 
7139  vm_stack_canary_was_born = true;
7140  VM_ASSERT(n == 0);
7141 }
7142 
7143 void
7144 rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7145 {
7146  /* Because a method has already been called, why not call
7147  * another one. */
7148  const char *insn = rb_insns_name(i);
7149  VALUE inspection = rb_inspect(c);
7150  const char *str = StringValueCStr(inspection);
7151 
7152  rb_bug("dead canary found at %s: %s", insn, str);
7153 }
7154 
7155 #else
7156 void Init_vm_stack_canary(void) { /* nothing to do */ }
7157 #endif
7158 
7159 
7160 /* a part of the following code is generated by this ruby script:
7161 
7162 16.times{|i|
7163  typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7164  typedef_args.prepend(", ") if i != 0
7165  call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7166  call_args.prepend(", ") if i != 0
7167  puts %Q{
7168 static VALUE
7169 builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7170 {
7171  typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7172  return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7173 }}
7174 }
7175 
7176 puts
7177 puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
7178 16.times{|i|
7179  puts " builtin_invoker#{i},"
7180 }
7181 puts "};"
7182 */
7183 
7184 static VALUE
7185 builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7186 {
7187  typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7188  return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7189 }
7190 
7191 static VALUE
7192 builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7193 {
7194  typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7195  return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7196 }
7197 
7198 static VALUE
7199 builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7200 {
7201  typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7202  return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7203 }
7204 
7205 static VALUE
7206 builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7207 {
7208  typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7209  return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7210 }
7211 
7212 static VALUE
7213 builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7214 {
7215  typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7216  return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7217 }
7218 
7219 static VALUE
7220 builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7221 {
7222  typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7223  return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7224 }
7225 
7226 static VALUE
7227 builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7228 {
7229  typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7230  return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7231 }
7232 
7233 static VALUE
7234 builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7235 {
7236  typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7237  return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7238 }
7239 
7240 static VALUE
7241 builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7242 {
7243  typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7244  return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7245 }
7246 
7247 static VALUE
7248 builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7249 {
7250  typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7251  return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7252 }
7253 
7254 static VALUE
7255 builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7256 {
7257  typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7258  return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7259 }
7260 
7261 static VALUE
7262 builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7263 {
7264  typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7265  return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7266 }
7267 
7268 static VALUE
7269 builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7270 {
7271  typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7272  return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7273 }
7274 
7275 static VALUE
7276 builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7277 {
7278  typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7279  return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7280 }
7281 
7282 static VALUE
7283 builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7284 {
7285  typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7286  return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7287 }
7288 
7289 static VALUE
7290 builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7291 {
7292  typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7293  return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7294 }
7295 
7296 typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7297 
7298 static builtin_invoker
7299 lookup_builtin_invoker(int argc)
7300 {
7301  static const builtin_invoker invokers[] = {
7302  builtin_invoker0,
7303  builtin_invoker1,
7304  builtin_invoker2,
7305  builtin_invoker3,
7306  builtin_invoker4,
7307  builtin_invoker5,
7308  builtin_invoker6,
7309  builtin_invoker7,
7310  builtin_invoker8,
7311  builtin_invoker9,
7312  builtin_invoker10,
7313  builtin_invoker11,
7314  builtin_invoker12,
7315  builtin_invoker13,
7316  builtin_invoker14,
7317  builtin_invoker15,
7318  };
7319 
7320  return invokers[argc];
7321 }
7322 
7323 static inline VALUE
7324 invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7325 {
7326  const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7327  SETUP_CANARY(canary_p);
7328  rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7329  VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7330  CHECK_CANARY(canary_p, BIN(invokebuiltin));
7331  return ret;
7332 }
7333 
7334 static VALUE
7335 vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7336 {
7337  return invoke_bf(ec, cfp, bf, argv);
7338 }
7339 
7340 static VALUE
7341 vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7342 {
7343  if (0) { // debug print
7344  fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7345  for (int i=0; i<bf->argc; i++) {
7346  ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7347  }
7348  ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7349  (void *)(uintptr_t)bf->func_ptr);
7350  }
7351 
7352  if (bf->argc == 0) {
7353  return invoke_bf(ec, cfp, bf, NULL);
7354  }
7355  else {
7356  const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7357  return invoke_bf(ec, cfp, bf, argv);
7358  }
7359 }
7360 
7361 // for __builtin_inline!()
7362 
7363 VALUE
7364 rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7365 {
7366  const rb_control_frame_t *cfp = ec->cfp;
7367  return cfp->ep[index];
7368 }
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition: assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition: event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition: event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition: event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition: event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition: event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition: event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition: event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition: event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition: event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition: class.c:2297
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition: class.c:1076
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition: class.c:971
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition: class.c:950
#define TYPE(_)
Old name of rb_type.
Definition: value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition: memory.h:398
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:395
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition: double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition: array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition: fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition: assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition: long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:394
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition: error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:131
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition: fl_type.h:69
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition: fl_type.h:130
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void rb_notimplement(void)
Definition: error.c:3678
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3635
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:676
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1408
VALUE rb_eFatal
fatal exception.
Definition: error.c:1404
VALUE rb_eNoMethodError
NoMethodError exception.
Definition: error.c:1416
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition: eval.c:689
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1406
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:466
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition: error.c:3999
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition: error.c:1459
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1409
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
@ RB_WARN_CATEGORY_STRICT_UNUSED_BLOCK
Warning is for checking unused block strictly.
Definition: error.h:57
VALUE rb_cClass
Class class.
Definition: object.c:68
VALUE rb_cArray
Array class.
Definition: array.c:40
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:2093
VALUE rb_cRegexp
Regexp class.
Definition: re.c:2640
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition: object.c:1272
VALUE rb_cHash
Hash class.
Definition: hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition: object.c:680
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:64
VALUE rb_cModule
Module class.
Definition: object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition: object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:865
VALUE rb_cFloat
Float class.
Definition: numeric.c:197
VALUE rb_cProc
Proc class.
Definition: proc.c:44
VALUE rb_cString
String class.
Definition: string.c:78
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition: gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition: gc.h:603
int rb_during_gc(void)
Queries if the GC is busy.
Definition: gc.c:3412
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
Definition: array.c:5074
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
Definition: array.c:1496
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
Definition: array.c:2777
VALUE rb_ary_unshift(VALUE ary, VALUE elem)
Destructively prepends the passed item at the beginning of the passed array.
Definition: array.c:1719
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
Definition: array.c:5011
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
Definition: array.c:1397
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
Definition: array.c:1014
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
Definition: array.c:1431
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
Definition: array.c:859
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
Definition: array.c:1737
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
Definition: array.c:1207
VALUE rb_dbl2big(double d)
Converts a C's double into a bignum.
Definition: bignum.c:5285
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition: error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
Definition: hash.c:2099
VALUE rb_hash_dup(VALUE hash)
Duplicates a hash.
Definition: hash.c:1563
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition: proc.c:995
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition: re.c:1930
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition: re.c:3695
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition: re.c:1905
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition: re.c:1987
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition: re.c:1888
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition: re.c:1954
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition: re.c:2020
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition: string.c:3677
VALUE rb_sym_to_s(VALUE sym)
This is an rb_sym2str() + rb_str_dup() combo.
Definition: string.c:12151
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition: string.c:5269
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3643
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition: string.c:3919
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition: string.c:2363
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3455
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition: symbol.c:878
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition: thread.c:1480
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition: variable.c:3151
VALUE rb_attr_get(VALUE obj, ID name)
Identical to rb_ivar_get()
Definition: variable.c:1358
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1859
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition: variable.c:3943
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition: variable.c:3998
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1350
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition: variable.c:3620
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition: variable.c:2986
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition: variable.c:130
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition: variable.c:3157
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition: variable.c:336
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition: variable.c:1876
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition: variable.c:3479
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition: variable.c:4020
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:293
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition: variable.c:3473
int rb_method_basic_definition_p(VALUE klass, ID mid)
Well...
Definition: vm_method.c:2833
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition: vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition: vm_method.c:1292
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition: vm_method.c:1825
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition: symbol.c:1117
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:992
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:970
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
Definition: symbol.c:986
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
int off
Offset inside of ptr.
Definition: io.h:5
int len
Length of the buffer.
Definition: io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition: ractor.h:235
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1240
VALUE rb_uint2big(uintptr_t i)
Converts a C's intptr_t into an instance of rb_cInteger.
Definition: bignum.c:3200
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:367
#define ALLOCA_N(type, n)
Definition: memory.h:287
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:162
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition: memory.h:379
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition: rarray.h:366
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:281
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition: rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:79
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition: robject.h:136
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:416
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition: rstring.h:367
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition: rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition: scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition: scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition: stdarg.h:64
Ruby's array.
Definition: rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition: rarray.h:188
Definition: hash.h:53
Definition: iseq.h:269
Definition: vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition: vm_core.h:285
Definition: vm_core.h:293
Definition: vm_core.h:288
Definition: method.h:62
Definition: constant.h:33
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
Definition: method.h:54
rb_cref_t * cref
class reference, should be marked
Definition: method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: shape.h:44
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:88
SVAR (Special VARiable)
Definition: imemo.h:52
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition: imemo.h:54
THROW_DATA.
Definition: imemo.h:61
Definition: vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition: value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition: value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition: value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
Definition: gc.c:4239