Ruby  3.4.0dev (2024-11-05 revision 348a53415339076afc4a02fcd09f3ae36e9c4c61)
vm_insnhelper.c (348a53415339076afc4a02fcd09f3ae36e9c4c61)
1 /**********************************************************************
2 
3  vm_insnhelper.c - instruction helper functions.
4 
5  $Author$
6 
7  Copyright (C) 2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 #include "ruby/internal/config.h"
12 
13 #include <math.h>
14 
15 #ifdef HAVE_STDATOMIC_H
16  #include <stdatomic.h>
17 #endif
18 
19 #include "constant.h"
20 #include "debug_counter.h"
21 #include "internal.h"
22 #include "internal/class.h"
23 #include "internal/compar.h"
24 #include "internal/hash.h"
25 #include "internal/numeric.h"
26 #include "internal/proc.h"
27 #include "internal/random.h"
28 #include "internal/variable.h"
29 #include "internal/struct.h"
30 #include "variable.h"
31 
32 /* finish iseq array */
33 #include "insns.inc"
34 #include "insns_info.inc"
35 
36 extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
37 extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
38 extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
39 extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
40  int argc, const VALUE *argv, int priv);
41 
42 static const struct rb_callcache vm_empty_cc;
43 static const struct rb_callcache vm_empty_cc_for_super;
44 
45 /* control stack frame */
46 
47 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
48 
49 VALUE
50 ruby_vm_special_exception_copy(VALUE exc)
51 {
53  rb_obj_copy_ivar(e, exc);
54  return e;
55 }
56 
57 NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58 static void
59 ec_stack_overflow(rb_execution_context_t *ec, int setup)
60 {
61  VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62  ec->raised_flag = RAISED_STACKOVERFLOW;
63  if (setup) {
64  VALUE at = rb_ec_backtrace_object(ec);
65  mesg = ruby_vm_special_exception_copy(mesg);
66  rb_ivar_set(mesg, idBt, at);
67  rb_ivar_set(mesg, idBt_locations, at);
68  }
69  ec->errinfo = mesg;
70  EC_JUMP_TAG(ec, TAG_RAISE);
71 }
72 
73 NORETURN(static void vm_stackoverflow(void));
74 
75 static void
76 vm_stackoverflow(void)
77 {
78  ec_stack_overflow(GET_EC(), TRUE);
79 }
80 
81 NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
82 void
83 rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
84 {
85  if (rb_during_gc()) {
86  rb_bug("system stack overflow during GC. Faulty native extension?");
87  }
88  if (crit) {
89  ec->raised_flag = RAISED_STACKOVERFLOW;
90  ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
91  EC_JUMP_TAG(ec, TAG_RAISE);
92  }
93 #ifdef USE_SIGALTSTACK
94  ec_stack_overflow(ec, TRUE);
95 #else
96  ec_stack_overflow(ec, FALSE);
97 #endif
98 }
99 
100 static inline void stack_check(rb_execution_context_t *ec);
101 
102 #if VM_CHECK_MODE > 0
103 static int
104 callable_class_p(VALUE klass)
105 {
106 #if VM_CHECK_MODE >= 2
107  if (!klass) return FALSE;
108  switch (RB_BUILTIN_TYPE(klass)) {
109  default:
110  break;
111  case T_ICLASS:
112  if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
113  case T_MODULE:
114  return TRUE;
115  }
116  while (klass) {
117  if (klass == rb_cBasicObject) {
118  return TRUE;
119  }
120  klass = RCLASS_SUPER(klass);
121  }
122  return FALSE;
123 #else
124  return klass != 0;
125 #endif
126 }
127 
128 static int
129 callable_method_entry_p(const rb_callable_method_entry_t *cme)
130 {
131  if (cme == NULL) {
132  return TRUE;
133  }
134  else {
135  VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
136 
137  if (callable_class_p(cme->defined_class)) {
138  return TRUE;
139  }
140  else {
141  return FALSE;
142  }
143  }
144 }
145 
146 static void
147 vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
148 {
149  unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
150  enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
151 
152  if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
153  cref_or_me_type = imemo_type(cref_or_me);
154  }
155  if (type & VM_FRAME_FLAG_BMETHOD) {
156  req_me = TRUE;
157  }
158 
159  if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
160  rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
161  }
162  if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
163  rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
164  }
165 
166  if (req_me) {
167  if (cref_or_me_type != imemo_ment) {
168  rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
169  }
170  }
171  else {
172  if (req_cref && cref_or_me_type != imemo_cref) {
173  rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
174  }
175  else { /* cref or Qfalse */
176  if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
177  if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
178  /* ignore */
179  }
180  else {
181  rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
182  }
183  }
184  }
185  }
186 
187  if (cref_or_me_type == imemo_ment) {
188  const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
189 
190  if (!callable_method_entry_p(me)) {
191  rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
192  }
193  }
194 
195  if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
196  VM_ASSERT(iseq == NULL ||
197  RBASIC_CLASS((VALUE)iseq) == 0 || // dummy frame for loading
198  RUBY_VM_NORMAL_ISEQ_P(iseq) //argument error
199  );
200  }
201  else {
202  VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
203  }
204 }
205 
206 static void
207 vm_check_frame(VALUE type,
208  VALUE specval,
209  VALUE cref_or_me,
210  const rb_iseq_t *iseq)
211 {
212  VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
213  VM_ASSERT(FIXNUM_P(type));
214 
215 #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
216  case magic: \
217  vm_check_frame_detail(type, req_block, req_me, req_cref, \
218  specval, cref_or_me, is_cframe, iseq); \
219  break
220  switch (given_magic) {
221  /* BLK ME CREF CFRAME */
222  CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
223  CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
224  CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
225  CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
226  CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
227  CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
228  CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
229  CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
230  CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
231  default:
232  rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
233  }
234 #undef CHECK
235 }
236 
237 static VALUE vm_stack_canary; /* Initialized later */
238 static bool vm_stack_canary_was_born = false;
239 
240 // Return the index of the instruction right before the given PC.
241 // This is needed because insn_entry advances PC before the insn body.
242 static unsigned int
243 previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
244 {
245  unsigned int pos = 0;
246  while (pos < ISEQ_BODY(iseq)->iseq_size) {
247  int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
248  unsigned int next_pos = pos + insn_len(opcode);
249  if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
250  return pos;
251  }
252  pos = next_pos;
253  }
254  rb_bug("failed to find the previous insn");
255 }
256 
257 void
258 rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
259 {
260  const struct rb_control_frame_struct *reg_cfp = ec->cfp;
261  const struct rb_iseq_struct *iseq;
262 
263  if (! LIKELY(vm_stack_canary_was_born)) {
264  return; /* :FIXME: isn't it rather fatal to enter this branch? */
265  }
266  else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
267  /* This is at the very beginning of a thread. cfp does not exist. */
268  return;
269  }
270  else if (! (iseq = GET_ISEQ())) {
271  return;
272  }
273  else if (LIKELY(sp[0] != vm_stack_canary)) {
274  return;
275  }
276  else {
277  /* we are going to call methods below; squash the canary to
278  * prevent infinite loop. */
279  sp[0] = Qundef;
280  }
281 
282  const VALUE *orig = rb_iseq_original_iseq(iseq);
283  const VALUE iseqw = rb_iseqw_new(iseq);
284  const VALUE inspection = rb_inspect(iseqw);
285  const char *stri = rb_str_to_cstr(inspection);
286  const VALUE disasm = rb_iseq_disasm(iseq);
287  const char *strd = rb_str_to_cstr(disasm);
288  const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
289  const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
290  const char *name = insn_name(insn);
291 
292  /* rb_bug() is not capable of outputting this large contents. It
293  is designed to run form a SIGSEGV handler, which tends to be
294  very restricted. */
295  ruby_debug_printf(
296  "We are killing the stack canary set by %s, "
297  "at %s@pc=%"PRIdPTR"\n"
298  "watch out the C stack trace.\n"
299  "%s",
300  name, stri, pos, strd);
301  rb_bug("see above.");
302 }
303 #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
304 
305 #else
306 #define vm_check_canary(ec, sp)
307 #define vm_check_frame(a, b, c, d)
308 #endif /* VM_CHECK_MODE > 0 */
309 
310 #if USE_DEBUG_COUNTER
311 static void
312 vm_push_frame_debug_counter_inc(
313  const struct rb_execution_context_struct *ec,
314  const struct rb_control_frame_struct *reg_cfp,
315  VALUE type)
316 {
317  const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
318 
319  RB_DEBUG_COUNTER_INC(frame_push);
320 
321  if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
322  const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
323  const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
324  if (prev) {
325  if (curr) {
326  RB_DEBUG_COUNTER_INC(frame_R2R);
327  }
328  else {
329  RB_DEBUG_COUNTER_INC(frame_R2C);
330  }
331  }
332  else {
333  if (curr) {
334  RB_DEBUG_COUNTER_INC(frame_C2R);
335  }
336  else {
337  RB_DEBUG_COUNTER_INC(frame_C2C);
338  }
339  }
340  }
341 
342  switch (type & VM_FRAME_MAGIC_MASK) {
343  case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
344  case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
345  case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
346  case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
347  case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
348  case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
349  case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
350  case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
351  case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
352  }
353 
354  rb_bug("unreachable");
355 }
356 #else
357 #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
358 #endif
359 
360 // Return a poison value to be set above the stack top to verify leafness.
361 VALUE
362 rb_vm_stack_canary(void)
363 {
364 #if VM_CHECK_MODE > 0
365  return vm_stack_canary;
366 #else
367  return 0;
368 #endif
369 }
370 
371 STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
372 STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
373 STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
374 
375 static void
376 vm_push_frame(rb_execution_context_t *ec,
377  const rb_iseq_t *iseq,
378  VALUE type,
379  VALUE self,
380  VALUE specval,
381  VALUE cref_or_me,
382  const VALUE *pc,
383  VALUE *sp,
384  int local_size,
385  int stack_max)
386 {
387  rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
388 
389  vm_check_frame(type, specval, cref_or_me, iseq);
390  VM_ASSERT(local_size >= 0);
391 
392  /* check stack overflow */
393  CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
394  vm_check_canary(ec, sp);
395 
396  /* setup vm value stack */
397 
398  /* initialize local variables */
399  for (int i=0; i < local_size; i++) {
400  *sp++ = Qnil;
401  }
402 
403  /* setup ep with managing data */
404  *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
405  *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
406  *sp++ = type; /* ep[-0] / ENV_FLAGS */
407 
408  /* setup new frame */
409  *cfp = (const struct rb_control_frame_struct) {
410  .pc = pc,
411  .sp = sp,
412  .iseq = iseq,
413  .self = self,
414  .ep = sp - 1,
415  .block_code = NULL,
416 #if VM_DEBUG_BP_CHECK
417  .bp_check = sp,
418 #endif
419  .jit_return = NULL
420  };
421 
422  /* Ensure the initialization of `*cfp` above never gets reordered with the update of `ec->cfp` below.
423  This is a no-op in all cases we've looked at (https://godbolt.org/z/3oxd1446K), but should guarantee it for all
424  future/untested compilers/platforms. */
425 
426  #if defined HAVE_DECL_ATOMIC_SIGNAL_FENCE && HAVE_DECL_ATOMIC_SIGNAL_FENCE
427  atomic_signal_fence(memory_order_seq_cst);
428  #endif
429 
430  ec->cfp = cfp;
431 
432  if (VMDEBUG == 2) {
433  SDR();
434  }
435  vm_push_frame_debug_counter_inc(ec, cfp, type);
436 }
437 
438 void
439 rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
440 {
441  rb_control_frame_t *cfp = ec->cfp;
442 
443  if (VMDEBUG == 2) SDR();
444 
445  ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
446 }
447 
448 /* return TRUE if the frame is finished */
449 static inline int
450 vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
451 {
452  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
453 
454  if (VMDEBUG == 2) SDR();
455 
456  RUBY_VM_CHECK_INTS(ec);
457  ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
458 
459  return flags & VM_FRAME_FLAG_FINISH;
460 }
461 
462 void
463 rb_vm_pop_frame(rb_execution_context_t *ec)
464 {
465  vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
466 }
467 
468 // it pushes pseudo-frame with fname filename.
469 VALUE
470 rb_vm_push_frame_fname(rb_execution_context_t *ec, VALUE fname)
471 {
472  VALUE tmpbuf = rb_imemo_tmpbuf_auto_free_pointer();
473  void *ptr = ruby_xcalloc(sizeof(struct rb_iseq_constant_body) + sizeof(struct rb_iseq_struct), 1);
474  rb_imemo_tmpbuf_set_ptr(tmpbuf, ptr);
475 
476  struct rb_iseq_struct *dmy_iseq = (struct rb_iseq_struct *)ptr;
477  struct rb_iseq_constant_body *dmy_body = (struct rb_iseq_constant_body *)&dmy_iseq[1];
478  dmy_iseq->body = dmy_body;
479  dmy_body->type = ISEQ_TYPE_TOP;
480  dmy_body->location.pathobj = fname;
481 
482  vm_push_frame(ec,
483  dmy_iseq, //const rb_iseq_t *iseq,
484  VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, // VALUE type,
485  ec->cfp->self, // VALUE self,
486  VM_BLOCK_HANDLER_NONE, // VALUE specval,
487  Qfalse, // VALUE cref_or_me,
488  NULL, // const VALUE *pc,
489  ec->cfp->sp, // VALUE *sp,
490  0, // int local_size,
491  0); // int stack_max
492 
493  return tmpbuf;
494 }
495 
496 /* method dispatch */
497 static inline VALUE
498 rb_arity_error_new(int argc, int min, int max)
499 {
500  VALUE err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d", argc, min);
501  if (min == max) {
502  /* max is not needed */
503  }
504  else if (max == UNLIMITED_ARGUMENTS) {
505  rb_str_cat_cstr(err_mess, "+");
506  }
507  else {
508  rb_str_catf(err_mess, "..%d", max);
509  }
510  rb_str_cat_cstr(err_mess, ")");
511  return rb_exc_new3(rb_eArgError, err_mess);
512 }
513 
514 void
515 rb_error_arity(int argc, int min, int max)
516 {
517  rb_exc_raise(rb_arity_error_new(argc, min, max));
518 }
519 
520 /* lvar */
521 
522 NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
523 
524 static void
525 vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
526 {
527  /* remember env value forcely */
528  rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
529  VM_FORCE_WRITE(&ep[index], v);
530  VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
531  RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
532 }
533 
534 // YJIT assumes this function never runs GC
535 static inline void
536 vm_env_write(const VALUE *ep, int index, VALUE v)
537 {
538  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
539  if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
540  VM_STACK_ENV_WRITE(ep, index, v);
541  }
542  else {
543  vm_env_write_slowpath(ep, index, v);
544  }
545 }
546 
547 void
548 rb_vm_env_write(const VALUE *ep, int index, VALUE v)
549 {
550  vm_env_write(ep, index, v);
551 }
552 
553 VALUE
554 rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
555 {
556  if (block_handler == VM_BLOCK_HANDLER_NONE) {
557  return Qnil;
558  }
559  else {
560  switch (vm_block_handler_type(block_handler)) {
561  case block_handler_type_iseq:
562  case block_handler_type_ifunc:
563  return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
564  case block_handler_type_symbol:
565  return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
566  case block_handler_type_proc:
567  return VM_BH_TO_PROC(block_handler);
568  default:
569  VM_UNREACHABLE(rb_vm_bh_to_procval);
570  }
571  }
572 }
573 
574 /* svar */
575 
576 #if VM_CHECK_MODE > 0
577 static int
578 vm_svar_valid_p(VALUE svar)
579 {
580  if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
581  switch (imemo_type(svar)) {
582  case imemo_svar:
583  case imemo_cref:
584  case imemo_ment:
585  return TRUE;
586  default:
587  break;
588  }
589  }
590  rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
591  return FALSE;
592 }
593 #endif
594 
595 static inline struct vm_svar *
596 lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
597 {
598  VALUE svar;
599 
600  if (lep && (ec == NULL || ec->root_lep != lep)) {
601  svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
602  }
603  else {
604  svar = ec->root_svar;
605  }
606 
607  VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
608 
609  return (struct vm_svar *)svar;
610 }
611 
612 static inline void
613 lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
614 {
615  VM_ASSERT(vm_svar_valid_p((VALUE)svar));
616 
617  if (lep && (ec == NULL || ec->root_lep != lep)) {
618  vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
619  }
620  else {
621  RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
622  }
623 }
624 
625 static VALUE
626 lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
627 {
628  const struct vm_svar *svar = lep_svar(ec, lep);
629 
630  if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
631 
632  switch (key) {
633  case VM_SVAR_LASTLINE:
634  return svar->lastline;
635  case VM_SVAR_BACKREF:
636  return svar->backref;
637  default: {
638  const VALUE ary = svar->others;
639 
640  if (NIL_P(ary)) {
641  return Qnil;
642  }
643  else {
644  return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
645  }
646  }
647  }
648 }
649 
650 static struct vm_svar *
651 svar_new(VALUE obj)
652 {
653  struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
654  *((VALUE *)&svar->lastline) = Qnil;
655  *((VALUE *)&svar->backref) = Qnil;
656  *((VALUE *)&svar->others) = Qnil;
657 
658  return svar;
659 }
660 
661 static void
662 lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
663 {
664  struct vm_svar *svar = lep_svar(ec, lep);
665 
666  if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
667  lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
668  }
669 
670  switch (key) {
671  case VM_SVAR_LASTLINE:
672  RB_OBJ_WRITE(svar, &svar->lastline, val);
673  return;
674  case VM_SVAR_BACKREF:
675  RB_OBJ_WRITE(svar, &svar->backref, val);
676  return;
677  default: {
678  VALUE ary = svar->others;
679 
680  if (NIL_P(ary)) {
681  RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
682  }
683  rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
684  }
685  }
686 }
687 
688 static inline VALUE
689 vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
690 {
691  VALUE val;
692 
693  if (type == 0) {
694  val = lep_svar_get(ec, lep, key);
695  }
696  else {
697  VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
698 
699  if (type & 0x01) {
700  switch (type >> 1) {
701  case '&':
702  val = rb_reg_last_match(backref);
703  break;
704  case '`':
705  val = rb_reg_match_pre(backref);
706  break;
707  case '\'':
708  val = rb_reg_match_post(backref);
709  break;
710  case '+':
711  val = rb_reg_match_last(backref);
712  break;
713  default:
714  rb_bug("unexpected back-ref");
715  }
716  }
717  else {
718  val = rb_reg_nth_match((int)(type >> 1), backref);
719  }
720  }
721  return val;
722 }
723 
724 static inline VALUE
725 vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
726 {
727  VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
728  int nth = 0;
729 
730  if (type & 0x01) {
731  switch (type >> 1) {
732  case '&':
733  case '`':
734  case '\'':
735  break;
736  case '+':
737  return rb_reg_last_defined(backref);
738  default:
739  rb_bug("unexpected back-ref");
740  }
741  }
742  else {
743  nth = (int)(type >> 1);
744  }
745  return rb_reg_nth_defined(nth, backref);
746 }
747 
748 PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
750 check_method_entry(VALUE obj, int can_be_svar)
751 {
752  if (obj == Qfalse) return NULL;
753 
754 #if VM_CHECK_MODE > 0
755  if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
756 #endif
757 
758  switch (imemo_type(obj)) {
759  case imemo_ment:
760  return (rb_callable_method_entry_t *)obj;
761  case imemo_cref:
762  return NULL;
763  case imemo_svar:
764  if (can_be_svar) {
765  return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
766  }
767  default:
768 #if VM_CHECK_MODE > 0
769  rb_bug("check_method_entry: svar should not be there:");
770 #endif
771  return NULL;
772  }
773 }
774 
776 rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
777 {
778  const VALUE *ep = cfp->ep;
780 
781  while (!VM_ENV_LOCAL_P(ep)) {
782  if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
783  ep = VM_ENV_PREV_EP(ep);
784  }
785 
786  return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
787 }
788 
789 static const rb_iseq_t *
790 method_entry_iseqptr(const rb_callable_method_entry_t *me)
791 {
792  switch (me->def->type) {
793  case VM_METHOD_TYPE_ISEQ:
794  return me->def->body.iseq.iseqptr;
795  default:
796  return NULL;
797  }
798 }
799 
800 static rb_cref_t *
801 method_entry_cref(const rb_callable_method_entry_t *me)
802 {
803  switch (me->def->type) {
804  case VM_METHOD_TYPE_ISEQ:
805  return me->def->body.iseq.cref;
806  default:
807  return NULL;
808  }
809 }
810 
811 #if VM_CHECK_MODE == 0
812 PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
813 #endif
814 static rb_cref_t *
815 check_cref(VALUE obj, int can_be_svar)
816 {
817  if (obj == Qfalse) return NULL;
818 
819 #if VM_CHECK_MODE > 0
820  if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
821 #endif
822 
823  switch (imemo_type(obj)) {
824  case imemo_ment:
825  return method_entry_cref((rb_callable_method_entry_t *)obj);
826  case imemo_cref:
827  return (rb_cref_t *)obj;
828  case imemo_svar:
829  if (can_be_svar) {
830  return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
831  }
832  default:
833 #if VM_CHECK_MODE > 0
834  rb_bug("check_method_entry: svar should not be there:");
835 #endif
836  return NULL;
837  }
838 }
839 
840 static inline rb_cref_t *
841 vm_env_cref(const VALUE *ep)
842 {
843  rb_cref_t *cref;
844 
845  while (!VM_ENV_LOCAL_P(ep)) {
846  if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
847  ep = VM_ENV_PREV_EP(ep);
848  }
849 
850  return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
851 }
852 
853 static int
854 is_cref(const VALUE v, int can_be_svar)
855 {
856  if (RB_TYPE_P(v, T_IMEMO)) {
857  switch (imemo_type(v)) {
858  case imemo_cref:
859  return TRUE;
860  case imemo_svar:
861  if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
862  default:
863  break;
864  }
865  }
866  return FALSE;
867 }
868 
869 static int
870 vm_env_cref_by_cref(const VALUE *ep)
871 {
872  while (!VM_ENV_LOCAL_P(ep)) {
873  if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
874  ep = VM_ENV_PREV_EP(ep);
875  }
876  return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
877 }
878 
879 static rb_cref_t *
880 cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
881 {
882  const VALUE v = *vptr;
883  rb_cref_t *cref, *new_cref;
884 
885  if (RB_TYPE_P(v, T_IMEMO)) {
886  switch (imemo_type(v)) {
887  case imemo_cref:
888  cref = (rb_cref_t *)v;
889  new_cref = vm_cref_dup(cref);
890  if (parent) {
891  RB_OBJ_WRITE(parent, vptr, new_cref);
892  }
893  else {
894  VM_FORCE_WRITE(vptr, (VALUE)new_cref);
895  }
896  return (rb_cref_t *)new_cref;
897  case imemo_svar:
898  if (can_be_svar) {
899  return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
900  }
901  /* fall through */
902  case imemo_ment:
903  rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
904  default:
905  break;
906  }
907  }
908  return NULL;
909 }
910 
911 static rb_cref_t *
912 vm_cref_replace_with_duplicated_cref(const VALUE *ep)
913 {
914  if (vm_env_cref_by_cref(ep)) {
915  rb_cref_t *cref;
916  VALUE envval;
917 
918  while (!VM_ENV_LOCAL_P(ep)) {
919  envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
920  if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
921  return cref;
922  }
923  ep = VM_ENV_PREV_EP(ep);
924  }
925  envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
926  return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
927  }
928  else {
929  rb_bug("vm_cref_dup: unreachable");
930  }
931 }
932 
933 static rb_cref_t *
934 vm_get_cref(const VALUE *ep)
935 {
936  rb_cref_t *cref = vm_env_cref(ep);
937 
938  if (cref != NULL) {
939  return cref;
940  }
941  else {
942  rb_bug("vm_get_cref: unreachable");
943  }
944 }
945 
946 rb_cref_t *
947 rb_vm_get_cref(const VALUE *ep)
948 {
949  return vm_get_cref(ep);
950 }
951 
952 static rb_cref_t *
953 vm_ec_cref(const rb_execution_context_t *ec)
954 {
955  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
956 
957  if (cfp == NULL) {
958  return NULL;
959  }
960  return vm_get_cref(cfp->ep);
961 }
962 
963 static const rb_cref_t *
964 vm_get_const_key_cref(const VALUE *ep)
965 {
966  const rb_cref_t *cref = vm_get_cref(ep);
967  const rb_cref_t *key_cref = cref;
968 
969  while (cref) {
970  if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
971  RCLASS_EXT(CREF_CLASS(cref))->cloned) {
972  return key_cref;
973  }
974  cref = CREF_NEXT(cref);
975  }
976 
977  /* does not include singleton class */
978  return NULL;
979 }
980 
981 void
982 rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
983 {
984  rb_cref_t *new_cref;
985 
986  while (cref) {
987  if (CREF_CLASS(cref) == old_klass) {
988  new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
989  *new_cref_ptr = new_cref;
990  return;
991  }
992  new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
993  cref = CREF_NEXT(cref);
994  *new_cref_ptr = new_cref;
995  new_cref_ptr = &new_cref->next;
996  }
997  *new_cref_ptr = NULL;
998 }
999 
1000 static rb_cref_t *
1001 vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
1002 {
1003  rb_cref_t *prev_cref = NULL;
1004 
1005  if (ep) {
1006  prev_cref = vm_env_cref(ep);
1007  }
1008  else {
1009  rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
1010 
1011  if (cfp) {
1012  prev_cref = vm_env_cref(cfp->ep);
1013  }
1014  }
1015 
1016  return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
1017 }
1018 
1019 static inline VALUE
1020 vm_get_cbase(const VALUE *ep)
1021 {
1022  const rb_cref_t *cref = vm_get_cref(ep);
1023 
1024  return CREF_CLASS_FOR_DEFINITION(cref);
1025 }
1026 
1027 static inline VALUE
1028 vm_get_const_base(const VALUE *ep)
1029 {
1030  const rb_cref_t *cref = vm_get_cref(ep);
1031 
1032  while (cref) {
1033  if (!CREF_PUSHED_BY_EVAL(cref)) {
1034  return CREF_CLASS_FOR_DEFINITION(cref);
1035  }
1036  cref = CREF_NEXT(cref);
1037  }
1038 
1039  return Qundef;
1040 }
1041 
1042 static inline void
1043 vm_check_if_namespace(VALUE klass)
1044 {
1045  if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
1046  rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
1047  }
1048 }
1049 
1050 static inline void
1051 vm_ensure_not_refinement_module(VALUE self)
1052 {
1053  if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
1054  rb_warn("not defined at the refinement, but at the outer class/module");
1055  }
1056 }
1057 
1058 static inline VALUE
1059 vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
1060 {
1061  return klass;
1062 }
1063 
1064 static inline VALUE
1065 vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
1066 {
1067  void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
1068  VALUE val;
1069 
1070  if (NIL_P(orig_klass) && allow_nil) {
1071  /* in current lexical scope */
1072  const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
1073  const rb_cref_t *cref;
1074  VALUE klass = Qnil;
1075 
1076  while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
1077  root_cref = CREF_NEXT(root_cref);
1078  }
1079  cref = root_cref;
1080  while (cref && CREF_NEXT(cref)) {
1081  if (CREF_PUSHED_BY_EVAL(cref)) {
1082  klass = Qnil;
1083  }
1084  else {
1085  klass = CREF_CLASS(cref);
1086  }
1087  cref = CREF_NEXT(cref);
1088 
1089  if (!NIL_P(klass)) {
1090  VALUE av, am = 0;
1091  rb_const_entry_t *ce;
1092  search_continue:
1093  if ((ce = rb_const_lookup(klass, id))) {
1094  rb_const_warn_if_deprecated(ce, klass, id);
1095  val = ce->value;
1096  if (UNDEF_P(val)) {
1097  if (am == klass) break;
1098  am = klass;
1099  if (is_defined) return 1;
1100  if (rb_autoloading_value(klass, id, &av, NULL)) return av;
1101  rb_autoload_load(klass, id);
1102  goto search_continue;
1103  }
1104  else {
1105  if (is_defined) {
1106  return 1;
1107  }
1108  else {
1109  if (UNLIKELY(!rb_ractor_main_p())) {
1110  if (!rb_ractor_shareable_p(val)) {
1111  rb_raise(rb_eRactorIsolationError,
1112  "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1113  }
1114  }
1115  return val;
1116  }
1117  }
1118  }
1119  }
1120  }
1121 
1122  /* search self */
1123  if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1124  klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1125  }
1126  else {
1127  klass = CLASS_OF(ec->cfp->self);
1128  }
1129 
1130  if (is_defined) {
1131  return rb_const_defined(klass, id);
1132  }
1133  else {
1134  return rb_const_get(klass, id);
1135  }
1136  }
1137  else {
1138  vm_check_if_namespace(orig_klass);
1139  if (is_defined) {
1140  return rb_public_const_defined_from(orig_klass, id);
1141  }
1142  else {
1143  return rb_public_const_get_from(orig_klass, id);
1144  }
1145  }
1146 }
1147 
1148 VALUE
1149 rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil)
1150 {
1151  return vm_get_ev_const(ec, orig_klass, id, allow_nil == Qtrue, 0);
1152 }
1153 
1154 static inline VALUE
1155 vm_get_ev_const_chain(rb_execution_context_t *ec, const ID *segments)
1156 {
1157  VALUE val = Qnil;
1158  int idx = 0;
1159  int allow_nil = TRUE;
1160  if (segments[0] == idNULL) {
1161  val = rb_cObject;
1162  idx++;
1163  allow_nil = FALSE;
1164  }
1165  while (segments[idx]) {
1166  ID id = segments[idx++];
1167  val = vm_get_ev_const(ec, val, id, allow_nil, 0);
1168  allow_nil = FALSE;
1169  }
1170  return val;
1171 }
1172 
1173 
1174 static inline VALUE
1175 vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1176 {
1177  VALUE klass;
1178 
1179  if (!cref) {
1180  rb_bug("vm_get_cvar_base: no cref");
1181  }
1182 
1183  while (CREF_NEXT(cref) &&
1184  (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
1185  CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1186  cref = CREF_NEXT(cref);
1187  }
1188  if (top_level_raise && !CREF_NEXT(cref)) {
1189  rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1190  }
1191 
1192  klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1193 
1194  if (NIL_P(klass)) {
1195  rb_raise(rb_eTypeError, "no class variables available");
1196  }
1197  return klass;
1198 }
1199 
1200 ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
1201 static inline void
1202 fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
1203 {
1204  if (is_attr) {
1205  vm_cc_attr_index_set(cc, index, shape_id);
1206  }
1207  else {
1208  vm_ic_attr_index_set(iseq, ic, index, shape_id);
1209  }
1210 }
1211 
1212 #define ractor_incidental_shareable_p(cond, val) \
1213  (!(cond) || rb_ractor_shareable_p(val))
1214 #define ractor_object_incidental_shareable_p(obj, val) \
1215  ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1216 
1217 #define ATTR_INDEX_NOT_SET (attr_index_t)-1
1218 
1219 ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
1220 static inline VALUE
1221 vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
1222 {
1223 #if OPT_IC_FOR_IVAR
1224  VALUE val = Qundef;
1225  shape_id_t shape_id;
1226  VALUE * ivar_list;
1227 
1228  if (SPECIAL_CONST_P(obj)) {
1229  return default_value;
1230  }
1231 
1232 #if SHAPE_IN_BASIC_FLAGS
1233  shape_id = RBASIC_SHAPE_ID(obj);
1234 #endif
1235 
1236  switch (BUILTIN_TYPE(obj)) {
1237  case T_OBJECT:
1238  ivar_list = ROBJECT_IVPTR(obj);
1239  VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1240 
1241 #if !SHAPE_IN_BASIC_FLAGS
1242  shape_id = ROBJECT_SHAPE_ID(obj);
1243 #endif
1244  break;
1245  case T_CLASS:
1246  case T_MODULE:
1247  {
1248  if (UNLIKELY(!rb_ractor_main_p())) {
1249  // For two reasons we can only use the fast path on the main
1250  // ractor.
1251  // First, only the main ractor is allowed to set ivars on classes
1252  // and modules. So we can skip locking.
1253  // Second, other ractors need to check the shareability of the
1254  // values returned from the class ivars.
1255 
1256  if (default_value == Qundef) { // defined?
1257  return rb_ivar_defined(obj, id) ? Qtrue : Qundef;
1258  }
1259  else {
1260  goto general_path;
1261  }
1262  }
1263 
1264  ivar_list = RCLASS_IVPTR(obj);
1265 
1266 #if !SHAPE_IN_BASIC_FLAGS
1267  shape_id = RCLASS_SHAPE_ID(obj);
1268 #endif
1269 
1270  break;
1271  }
1272  default:
1273  if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1274  struct gen_ivtbl *ivtbl;
1275  rb_gen_ivtbl_get(obj, id, &ivtbl);
1276 #if !SHAPE_IN_BASIC_FLAGS
1277  shape_id = ivtbl->shape_id;
1278 #endif
1279  ivar_list = ivtbl->as.shape.ivptr;
1280  }
1281  else {
1282  return default_value;
1283  }
1284  }
1285 
1286  shape_id_t cached_id;
1287  attr_index_t index;
1288 
1289  if (is_attr) {
1290  vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
1291  }
1292  else {
1293  vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
1294  }
1295 
1296  if (LIKELY(cached_id == shape_id)) {
1297  RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1298 
1299  if (index == ATTR_INDEX_NOT_SET) {
1300  return default_value;
1301  }
1302 
1303  val = ivar_list[index];
1304 #if USE_DEBUG_COUNTER
1305  RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1306 
1307  if (RB_TYPE_P(obj, T_OBJECT)) {
1308  RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
1309  }
1310 #endif
1311  RUBY_ASSERT(!UNDEF_P(val));
1312  }
1313  else { // cache miss case
1314 #if USE_DEBUG_COUNTER
1315  if (is_attr) {
1316  if (cached_id != INVALID_SHAPE_ID) {
1317  RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
1318  }
1319  else {
1320  RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
1321  }
1322  }
1323  else {
1324  if (cached_id != INVALID_SHAPE_ID) {
1325  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
1326  }
1327  else {
1328  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
1329  }
1330  }
1331  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1332 
1333  if (RB_TYPE_P(obj, T_OBJECT)) {
1334  RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
1335  }
1336 #endif
1337 
1338  if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
1339  st_table *table = NULL;
1340  switch (BUILTIN_TYPE(obj)) {
1341  case T_CLASS:
1342  case T_MODULE:
1343  table = (st_table *)RCLASS_IVPTR(obj);
1344  break;
1345 
1346  case T_OBJECT:
1347  table = ROBJECT_IV_HASH(obj);
1348  break;
1349 
1350  default: {
1351  struct gen_ivtbl *ivtbl;
1352  if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
1353  table = ivtbl->as.complex.table;
1354  }
1355  break;
1356  }
1357  }
1358 
1359  if (!table || !st_lookup(table, id, &val)) {
1360  val = default_value;
1361  }
1362  }
1363  else {
1364  shape_id_t previous_cached_id = cached_id;
1365  if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
1366  // This fills in the cache with the shared cache object.
1367  // "ent" is the shared cache object
1368  if (cached_id != previous_cached_id) {
1369  fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
1370  }
1371 
1372  if (index == ATTR_INDEX_NOT_SET) {
1373  val = default_value;
1374  }
1375  else {
1376  // We fetched the ivar list above
1377  val = ivar_list[index];
1378  RUBY_ASSERT(!UNDEF_P(val));
1379  }
1380  }
1381  else {
1382  if (is_attr) {
1383  vm_cc_attr_index_initialize(cc, shape_id);
1384  }
1385  else {
1386  vm_ic_attr_index_initialize(ic, shape_id);
1387  }
1388 
1389  val = default_value;
1390  }
1391  }
1392 
1393  }
1394 
1395  if (!UNDEF_P(default_value)) {
1396  RUBY_ASSERT(!UNDEF_P(val));
1397  }
1398 
1399  return val;
1400 
1401 general_path:
1402 #endif /* OPT_IC_FOR_IVAR */
1403  RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1404 
1405  if (is_attr) {
1406  return rb_attr_get(obj, id);
1407  }
1408  else {
1409  return rb_ivar_get(obj, id);
1410  }
1411 }
1412 
1413 static void
1414 populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
1415 {
1416  RUBY_ASSERT(next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1417 
1418  // Cache population code
1419  if (is_attr) {
1420  vm_cc_attr_index_set(cc, index, next_shape_id);
1421  }
1422  else {
1423  vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
1424  }
1425 }
1426 
1427 ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1428 NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1429 NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1430 
1431 static VALUE
1432 vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1433 {
1434 #if OPT_IC_FOR_IVAR
1435  RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1436 
1437  if (BUILTIN_TYPE(obj) == T_OBJECT) {
1438  rb_check_frozen(obj);
1439 
1440  attr_index_t index = rb_obj_ivar_set(obj, id, val);
1441 
1442  shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
1443 
1444  if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
1445  populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
1446  }
1447 
1448  RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
1449  return val;
1450  }
1451 #endif
1452  return rb_ivar_set(obj, id, val);
1453 }
1454 
1455 static VALUE
1456 vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1457 {
1458  return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1459 }
1460 
1461 static VALUE
1462 vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1463 {
1464  return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1465 }
1466 
1467 NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
1468 static VALUE
1469 vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1470 {
1471 #if SHAPE_IN_BASIC_FLAGS
1472  shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1473 #else
1474  shape_id_t shape_id = rb_generic_shape_id(obj);
1475 #endif
1476 
1477  struct gen_ivtbl *ivtbl = 0;
1478 
1479  // Cache hit case
1480  if (shape_id == dest_shape_id) {
1481  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1482  }
1483  else if (dest_shape_id != INVALID_SHAPE_ID) {
1484  rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1485  rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1486 
1487  if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1488  RUBY_ASSERT(index < dest_shape->capacity);
1489  }
1490  else {
1491  return Qundef;
1492  }
1493  }
1494  else {
1495  return Qundef;
1496  }
1497 
1498  rb_gen_ivtbl_get(obj, 0, &ivtbl);
1499 
1500  if (shape_id != dest_shape_id) {
1501 #if SHAPE_IN_BASIC_FLAGS
1502  RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
1503 #else
1504  ivtbl->shape_id = dest_shape_id;
1505 #endif
1506  }
1507 
1508  RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
1509 
1510  RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1511 
1512  return val;
1513 }
1514 
1515 static inline VALUE
1516 vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
1517 {
1518 #if OPT_IC_FOR_IVAR
1519  switch (BUILTIN_TYPE(obj)) {
1520  case T_OBJECT:
1521  {
1522  VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
1523 
1524  shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
1525  RUBY_ASSERT(dest_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID);
1526 
1527  if (LIKELY(shape_id == dest_shape_id)) {
1528  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1529  VM_ASSERT(!rb_ractor_shareable_p(obj));
1530  }
1531  else if (dest_shape_id != INVALID_SHAPE_ID) {
1532  rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
1533  rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
1534  shape_id_t source_shape_id = dest_shape->parent_id;
1535 
1536  if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
1537  RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
1538 
1539  ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
1540 
1541  RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id), id) == dest_shape);
1542  RUBY_ASSERT(index < dest_shape->capacity);
1543  }
1544  else {
1545  break;
1546  }
1547  }
1548  else {
1549  break;
1550  }
1551 
1552  VALUE *ptr = ROBJECT_IVPTR(obj);
1553 
1554  RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
1555  RB_OBJ_WRITE(obj, &ptr[index], val);
1556 
1557  RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1558  RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
1559  return val;
1560  }
1561  break;
1562  case T_CLASS:
1563  case T_MODULE:
1564  RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1565  default:
1566  break;
1567  }
1568 
1569  return Qundef;
1570 #endif /* OPT_IC_FOR_IVAR */
1571 }
1572 
1573 static VALUE
1574 update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
1575 {
1576  VALUE defined_class = 0;
1577  VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1578 
1579  if (RB_TYPE_P(defined_class, T_ICLASS)) {
1580  defined_class = RBASIC(defined_class)->klass;
1581  }
1582 
1583  struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1584  if (!rb_cvc_tbl) {
1585  rb_bug("the cvc table should be set");
1586  }
1587 
1588  VALUE ent_data;
1589  if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1590  rb_bug("should have cvar cache entry");
1591  }
1592 
1593  struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1594 
1595  ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1596  ent->cref = cref;
1597  ic->entry = ent;
1598 
1599  RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
1600  RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
1601  RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1602  RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
1603 
1604  return cvar_value;
1605 }
1606 
1607 static inline VALUE
1608 vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1609 {
1610  const rb_cref_t *cref;
1611  cref = vm_get_cref(GET_EP());
1612 
1613  if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1614  RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1615 
1616  VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
1617  RUBY_ASSERT(!UNDEF_P(v));
1618 
1619  return v;
1620  }
1621 
1622  VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1623 
1624  return update_classvariable_cache(iseq, klass, id, cref, ic);
1625 }
1626 
1627 VALUE
1628 rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1629 {
1630  return vm_getclassvariable(iseq, cfp, id, ic);
1631 }
1632 
1633 static inline void
1634 vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1635 {
1636  const rb_cref_t *cref;
1637  cref = vm_get_cref(GET_EP());
1638 
1639  if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
1640  RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1641 
1642  rb_class_ivar_set(ic->entry->class_value, id, val);
1643  return;
1644  }
1645 
1646  VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1647 
1648  rb_cvar_set(klass, id, val);
1649 
1650  update_classvariable_cache(iseq, klass, id, cref, ic);
1651 }
1652 
1653 void
1654 rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1655 {
1656  vm_setclassvariable(iseq, cfp, id, val, ic);
1657 }
1658 
1659 static inline VALUE
1660 vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1661 {
1662  return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
1663 }
1664 
1665 static inline void
1666 vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1667 {
1668  if (RB_SPECIAL_CONST_P(obj)) {
1670  return;
1671  }
1672 
1673  shape_id_t dest_shape_id;
1674  attr_index_t index;
1675  vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
1676 
1677  if (UNLIKELY(UNDEF_P(vm_setivar(obj, id, val, dest_shape_id, index)))) {
1678  switch (BUILTIN_TYPE(obj)) {
1679  case T_OBJECT:
1680  case T_CLASS:
1681  case T_MODULE:
1682  break;
1683  default:
1684  if (!UNDEF_P(vm_setivar_default(obj, id, val, dest_shape_id, index))) {
1685  return;
1686  }
1687  }
1688  vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1689  }
1690 }
1691 
1692 void
1693 rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1694 {
1695  vm_setinstancevariable(iseq, obj, id, val, ic);
1696 }
1697 
1698 static VALUE
1699 vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1700 {
1701  /* continue throw */
1702 
1703  if (FIXNUM_P(err)) {
1704  ec->tag->state = RUBY_TAG_FATAL;
1705  }
1706  else if (SYMBOL_P(err)) {
1707  ec->tag->state = TAG_THROW;
1708  }
1709  else if (THROW_DATA_P(err)) {
1710  ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1711  }
1712  else {
1713  ec->tag->state = TAG_RAISE;
1714  }
1715  return err;
1716 }
1717 
1718 static VALUE
1719 vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1720  const int flag, const VALUE throwobj)
1721 {
1722  const rb_control_frame_t *escape_cfp = NULL;
1723  const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1724 
1725  if (flag != 0) {
1726  /* do nothing */
1727  }
1728  else if (state == TAG_BREAK) {
1729  int is_orphan = 1;
1730  const VALUE *ep = GET_EP();
1731  const rb_iseq_t *base_iseq = GET_ISEQ();
1732  escape_cfp = reg_cfp;
1733 
1734  while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
1735  if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1736  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1737  ep = escape_cfp->ep;
1738  base_iseq = escape_cfp->iseq;
1739  }
1740  else {
1741  ep = VM_ENV_PREV_EP(ep);
1742  base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
1743  escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1744  VM_ASSERT(escape_cfp->iseq == base_iseq);
1745  }
1746  }
1747 
1748  if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1749  /* lambda{... break ...} */
1750  is_orphan = 0;
1751  state = TAG_RETURN;
1752  }
1753  else {
1754  ep = VM_ENV_PREV_EP(ep);
1755 
1756  while (escape_cfp < eocfp) {
1757  if (escape_cfp->ep == ep) {
1758  const rb_iseq_t *const iseq = escape_cfp->iseq;
1759  const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
1760  const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
1761  unsigned int i;
1762 
1763  if (!ct) break;
1764  for (i=0; i < ct->size; i++) {
1765  const struct iseq_catch_table_entry *const entry =
1766  UNALIGNED_MEMBER_PTR(ct, entries[i]);
1767 
1768  if (entry->type == CATCH_TYPE_BREAK &&
1769  entry->iseq == base_iseq &&
1770  entry->start < epc && entry->end >= epc) {
1771  if (entry->cont == epc) { /* found! */
1772  is_orphan = 0;
1773  }
1774  break;
1775  }
1776  }
1777  break;
1778  }
1779 
1780  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1781  }
1782  }
1783 
1784  if (is_orphan) {
1785  rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1786  }
1787  }
1788  else if (state == TAG_RETRY) {
1789  const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1790 
1791  escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1792  }
1793  else if (state == TAG_RETURN) {
1794  const VALUE *current_ep = GET_EP();
1795  const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1796  int in_class_frame = 0;
1797  int toplevel = 1;
1798  escape_cfp = reg_cfp;
1799 
1800  // find target_lep, target_ep
1801  while (!VM_ENV_LOCAL_P(ep)) {
1802  if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1803  target_ep = ep;
1804  }
1805  ep = VM_ENV_PREV_EP(ep);
1806  }
1807  target_lep = ep;
1808 
1809  while (escape_cfp < eocfp) {
1810  const VALUE *lep = VM_CF_LEP(escape_cfp);
1811 
1812  if (!target_lep) {
1813  target_lep = lep;
1814  }
1815 
1816  if (lep == target_lep &&
1817  VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1818  ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
1819  in_class_frame = 1;
1820  target_lep = 0;
1821  }
1822 
1823  if (lep == target_lep) {
1824  if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1825  toplevel = 0;
1826  if (in_class_frame) {
1827  /* lambda {class A; ... return ...; end} */
1828  goto valid_return;
1829  }
1830  else {
1831  const VALUE *tep = current_ep;
1832 
1833  while (target_lep != tep) {
1834  if (escape_cfp->ep == tep) {
1835  /* in lambda */
1836  if (tep == target_ep) {
1837  goto valid_return;
1838  }
1839  else {
1840  goto unexpected_return;
1841  }
1842  }
1843  tep = VM_ENV_PREV_EP(tep);
1844  }
1845  }
1846  }
1847  else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1848  switch (ISEQ_BODY(escape_cfp->iseq)->type) {
1849  case ISEQ_TYPE_TOP:
1850  case ISEQ_TYPE_MAIN:
1851  if (toplevel) {
1852  if (in_class_frame) goto unexpected_return;
1853  if (target_ep == NULL) {
1854  goto valid_return;
1855  }
1856  else {
1857  goto unexpected_return;
1858  }
1859  }
1860  break;
1861  case ISEQ_TYPE_EVAL: {
1862  const rb_iseq_t *is = escape_cfp->iseq;
1863  enum rb_iseq_type t = ISEQ_BODY(is)->type;
1864  while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
1865  if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
1866  t = ISEQ_BODY(is)->type;
1867  }
1868  toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
1869  break;
1870  }
1871  case ISEQ_TYPE_CLASS:
1872  toplevel = 0;
1873  break;
1874  default:
1875  break;
1876  }
1877  }
1878  }
1879 
1880  if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
1881  if (target_ep == NULL) {
1882  goto valid_return;
1883  }
1884  else {
1885  goto unexpected_return;
1886  }
1887  }
1888 
1889  escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1890  }
1891  unexpected_return:;
1892  rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1893 
1894  valid_return:;
1895  /* do nothing */
1896  }
1897  else {
1898  rb_bug("isns(throw): unsupported throw type");
1899  }
1900 
1901  ec->tag->state = state;
1902  return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1903 }
1904 
1905 static VALUE
1906 vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1907  rb_num_t throw_state, VALUE throwobj)
1908 {
1909  const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1910  const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1911 
1912  if (state != 0) {
1913  return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1914  }
1915  else {
1916  return vm_throw_continue(ec, throwobj);
1917  }
1918 }
1919 
1920 VALUE
1921 rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
1922 {
1923  return vm_throw(ec, reg_cfp, throw_state, throwobj);
1924 }
1925 
1926 static inline void
1927 vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
1928 {
1929  int is_splat = flag & 0x01;
1930  const VALUE *ptr;
1931  rb_num_t len;
1932  const VALUE obj = ary;
1933 
1934  if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1935  ary = obj;
1936  ptr = &ary;
1937  len = 1;
1938  }
1939  else {
1940  ptr = RARRAY_CONST_PTR(ary);
1941  len = (rb_num_t)RARRAY_LEN(ary);
1942  }
1943 
1944  if (num + is_splat == 0) {
1945  /* no space left on stack */
1946  }
1947  else if (flag & 0x02) {
1948  /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1949  rb_num_t i = 0, j;
1950 
1951  if (len < num) {
1952  for (i = 0; i < num - len; i++) {
1953  *cfp->sp++ = Qnil;
1954  }
1955  }
1956 
1957  for (j = 0; i < num; i++, j++) {
1958  VALUE v = ptr[len - j - 1];
1959  *cfp->sp++ = v;
1960  }
1961 
1962  if (is_splat) {
1963  *cfp->sp++ = rb_ary_new4(len - j, ptr);
1964  }
1965  }
1966  else {
1967  /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1968  if (is_splat) {
1969  if (num > len) {
1970  *cfp->sp++ = rb_ary_new();
1971  }
1972  else {
1973  *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
1974  }
1975  }
1976 
1977  if (num > len) {
1978  rb_num_t i = 0;
1979  for (; i < num - len; i++) {
1980  *cfp->sp++ = Qnil;
1981  }
1982 
1983  for (rb_num_t j = 0; i < num; i++, j++) {
1984  *cfp->sp++ = ptr[len - j - 1];
1985  }
1986  }
1987  else {
1988  for (rb_num_t j = 0; j < num; j++) {
1989  *cfp->sp++ = ptr[num - j - 1];
1990  }
1991  }
1992  }
1993 
1994  RB_GC_GUARD(ary);
1995 }
1996 
1997 static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1998 
1999 static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
2000 
2001 static struct rb_class_cc_entries *
2002 vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
2003 {
2004  struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
2005 #if VM_CHECK_MODE > 0
2006  ccs->debug_sig = ~(VALUE)ccs;
2007 #endif
2008  ccs->capa = 0;
2009  ccs->len = 0;
2010  ccs->cme = cme;
2011  METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
2012  ccs->entries = NULL;
2013 
2014  rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
2015  RB_OBJ_WRITTEN(klass, Qundef, cme);
2016  return ccs;
2017 }
2018 
2019 static void
2020 vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
2021 {
2022  if (! vm_cc_markable(cc)) {
2023  return;
2024  }
2025 
2026  if (UNLIKELY(ccs->len == ccs->capa)) {
2027  if (ccs->capa == 0) {
2028  ccs->capa = 1;
2029  ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
2030  }
2031  else {
2032  ccs->capa *= 2;
2033  REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
2034  }
2035  }
2036  VM_ASSERT(ccs->len < ccs->capa);
2037 
2038  const int pos = ccs->len++;
2039  ccs->entries[pos].argc = vm_ci_argc(ci);
2040  ccs->entries[pos].flag = vm_ci_flag(ci);
2041  RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
2042 
2043  if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
2044  // for tuning
2045  // vm_mtbl_dump(klass, 0);
2046  }
2047 }
2048 
2049 #if VM_CHECK_MODE > 0
2050 void
2051 rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
2052 {
2053  ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
2054  for (int i=0; i<ccs->len; i++) {
2055  ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2056  ccs->entries[i].flag,
2057  ccs->entries[i].argc);
2058  rp(ccs->entries[i].cc);
2059  }
2060 }
2061 
2062 static int
2063 vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
2064 {
2065  VM_ASSERT(vm_ccs_p(ccs));
2066  VM_ASSERT(ccs->len <= ccs->capa);
2067 
2068  for (int i=0; i<ccs->len; i++) {
2069  const struct rb_callcache *cc = ccs->entries[i].cc;
2070 
2071  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2072  VM_ASSERT(vm_cc_class_check(cc, klass));
2073  VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
2074  VM_ASSERT(!vm_cc_super_p(cc));
2075  VM_ASSERT(!vm_cc_refinement_p(cc));
2076  }
2077  return TRUE;
2078 }
2079 #endif
2080 
2081 const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
2082 
2083 static const struct rb_callcache *
2084 vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
2085 {
2086  const ID mid = vm_ci_mid(ci);
2087  struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2088  struct rb_class_cc_entries *ccs = NULL;
2089  VALUE ccs_data;
2090 
2091  if (cc_tbl) {
2092  // CCS data is keyed on method id, so we don't need the method id
2093  // for doing comparisons in the `for` loop below.
2094  if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
2095  ccs = (struct rb_class_cc_entries *)ccs_data;
2096  const int ccs_len = ccs->len;
2097 
2098  if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
2099  rb_vm_ccs_free(ccs);
2100  rb_id_table_delete(cc_tbl, mid);
2101  ccs = NULL;
2102  }
2103  else {
2104  VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
2105 
2106  // We already know the method id is correct because we had
2107  // to look up the ccs_data by method id. All we need to
2108  // compare is argc and flag
2109  unsigned int argc = vm_ci_argc(ci);
2110  unsigned int flag = vm_ci_flag(ci);
2111 
2112  for (int i=0; i<ccs_len; i++) {
2113  unsigned int ccs_ci_argc = ccs->entries[i].argc;
2114  unsigned int ccs_ci_flag = ccs->entries[i].flag;
2115  const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
2116 
2117  VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
2118 
2119  if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
2120  RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
2121 
2122  VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
2123  VM_ASSERT(ccs_cc->klass == klass);
2124  VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
2125 
2126  return ccs_cc;
2127  }
2128  }
2129  }
2130  }
2131  }
2132  else {
2133  cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
2134  }
2135 
2136  RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
2137 
2138  const rb_callable_method_entry_t *cme;
2139 
2140  if (ccs) {
2141  cme = ccs->cme;
2142  cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
2143 
2144  VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2145  }
2146  else {
2147  cme = rb_callable_method_entry(klass, mid);
2148  }
2149 
2150  VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
2151 
2152  if (cme == NULL) {
2153  // undef or not found: can't cache the information
2154  VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
2155  return &vm_empty_cc;
2156  }
2157 
2158  VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
2159 
2160  METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
2161 
2162  if (ccs == NULL) {
2163  VM_ASSERT(cc_tbl != NULL);
2164 
2165  if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
2166  // rb_callable_method_entry() prepares ccs.
2167  ccs = (struct rb_class_cc_entries *)ccs_data;
2168  }
2169  else {
2170  // TODO: required?
2171  ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
2172  }
2173  }
2174 
2175  cme = rb_check_overloaded_cme(cme, ci);
2176 
2177  const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
2178  vm_ccs_push(klass, ccs, ci, cc);
2179 
2180  VM_ASSERT(vm_cc_cme(cc) != NULL);
2181  VM_ASSERT(cme->called_id == mid);
2182  VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
2183 
2184  return cc;
2185 }
2186 
2187 const struct rb_callcache *
2188 rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
2189 {
2190  const struct rb_callcache *cc;
2191 
2192  VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
2193 
2194  RB_VM_LOCK_ENTER();
2195  {
2196  cc = vm_search_cc(klass, ci);
2197 
2198  VM_ASSERT(cc);
2199  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
2200  VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
2201  VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
2202  VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
2203  VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
2204  }
2205  RB_VM_LOCK_LEAVE();
2206 
2207  return cc;
2208 }
2209 
2210 static const struct rb_callcache *
2211 vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2212 {
2213 #if USE_DEBUG_COUNTER
2214  const struct rb_callcache *old_cc = cd->cc;
2215 #endif
2216 
2217  const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
2218 
2219 #if OPT_INLINE_METHOD_CACHE
2220  cd->cc = cc;
2221 
2222  const struct rb_callcache *empty_cc = &vm_empty_cc;
2223  if (cd_owner && cc != empty_cc) {
2224  RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
2225  }
2226 
2227 #if USE_DEBUG_COUNTER
2228  if (!old_cc || old_cc == empty_cc) {
2229  // empty
2230  RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
2231  }
2232  else if (old_cc == cc) {
2233  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
2234  }
2235  else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
2236  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
2237  }
2238  else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
2239  vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
2240  RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
2241  }
2242  else {
2243  RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
2244  }
2245 #endif
2246 #endif // OPT_INLINE_METHOD_CACHE
2247 
2248  VM_ASSERT(vm_cc_cme(cc) == NULL ||
2249  vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
2250 
2251  return cc;
2252 }
2253 
2254 ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
2255 static const struct rb_callcache *
2256 vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
2257 {
2258  const struct rb_callcache *cc = cd->cc;
2259 
2260 #if OPT_INLINE_METHOD_CACHE
2261  if (LIKELY(vm_cc_class_check(cc, klass))) {
2262  if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
2263  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
2264  RB_DEBUG_COUNTER_INC(mc_inline_hit);
2265  VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
2266  (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
2267  vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
2268 
2269  return cc;
2270  }
2271  RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
2272  }
2273  else {
2274  RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
2275  }
2276 #endif
2277 
2278  return vm_search_method_slowpath0(cd_owner, cd, klass);
2279 }
2280 
2281 static const struct rb_callcache *
2282 vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
2283 {
2284  VALUE klass = CLASS_OF(recv);
2285  VM_ASSERT(klass != Qfalse);
2286  VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
2287 
2288  return vm_search_method_fastpath(cd_owner, cd, klass);
2289 }
2290 
2291 #if __has_attribute(transparent_union)
2292 typedef union {
2293  VALUE (*anyargs)(ANYARGS);
2294  VALUE (*f00)(VALUE);
2295  VALUE (*f01)(VALUE, VALUE);
2296  VALUE (*f02)(VALUE, VALUE, VALUE);
2297  VALUE (*f03)(VALUE, VALUE, VALUE, VALUE);
2298  VALUE (*f04)(VALUE, VALUE, VALUE, VALUE, VALUE);
2299  VALUE (*f05)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2300  VALUE (*f06)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2301  VALUE (*f07)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2302  VALUE (*f08)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
2310  VALUE (*fm1)(int, union { VALUE *x; const VALUE *y; } __attribute__((__transparent_union__)), VALUE);
2311 } __attribute__((__transparent_union__)) cfunc_type;
2312 # define make_cfunc_type(f) (cfunc_type){.anyargs = (VALUE (*)(ANYARGS))(f)}
2313 #else
2314 typedef VALUE (*cfunc_type)(ANYARGS);
2315 # define make_cfunc_type(f) (cfunc_type)(f)
2316 #endif
2317 
2318 static inline int
2319 check_cfunc(const rb_callable_method_entry_t *me, cfunc_type func)
2320 {
2321  if (! me) {
2322  return false;
2323  }
2324  else {
2325  VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
2326  VM_ASSERT(callable_method_entry_p(me));
2327  VM_ASSERT(me->def);
2328  if (me->def->type != VM_METHOD_TYPE_CFUNC) {
2329  return false;
2330  }
2331  else {
2332 #if __has_attribute(transparent_union)
2333  return me->def->body.cfunc.func == func.anyargs;
2334 #else
2335  return me->def->body.cfunc.func == func;
2336 #endif
2337  }
2338  }
2339 }
2340 
2341 static inline int
2342 vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
2343 {
2344  VM_ASSERT(iseq != NULL);
2345  const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2346  return check_cfunc(vm_cc_cme(cc), func);
2347 }
2348 
2349 #define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
2350 #define vm_method_cfunc_is(iseq, cd, recv, func) vm_method_cfunc_is(iseq, cd, recv, make_cfunc_type(func))
2351 
2352 #define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2353 
2354 static inline bool
2355 FIXNUM_2_P(VALUE a, VALUE b)
2356 {
2357  /* FIXNUM_P(a) && FIXNUM_P(b)
2358  * == ((a & 1) && (b & 1))
2359  * == a & b & 1 */
2360  SIGNED_VALUE x = a;
2361  SIGNED_VALUE y = b;
2362  SIGNED_VALUE z = x & y & 1;
2363  return z == 1;
2364 }
2365 
2366 static inline bool
2367 FLONUM_2_P(VALUE a, VALUE b)
2368 {
2369 #if USE_FLONUM
2370  /* FLONUM_P(a) && FLONUM_P(b)
2371  * == ((a & 3) == 2) && ((b & 3) == 2)
2372  * == ! ((a ^ 2) | (b ^ 2) & 3)
2373  */
2374  SIGNED_VALUE x = a;
2375  SIGNED_VALUE y = b;
2376  SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2377  return !z;
2378 #else
2379  return false;
2380 #endif
2381 }
2382 
2383 static VALUE
2384 opt_equality_specialized(VALUE recv, VALUE obj)
2385 {
2386  if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2387  goto compare_by_identity;
2388  }
2389  else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2390  goto compare_by_identity;
2391  }
2392  else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2393  goto compare_by_identity;
2394  }
2395  else if (SPECIAL_CONST_P(recv)) {
2396  //
2397  }
2398  else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2399  double a = RFLOAT_VALUE(recv);
2400  double b = RFLOAT_VALUE(obj);
2401 
2402 #if MSC_VERSION_BEFORE(1300)
2403  if (isnan(a)) {
2404  return Qfalse;
2405  }
2406  else if (isnan(b)) {
2407  return Qfalse;
2408  }
2409  else
2410 #endif
2411  return RBOOL(a == b);
2412  }
2413  else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2414  if (recv == obj) {
2415  return Qtrue;
2416  }
2417  else if (RB_TYPE_P(obj, T_STRING)) {
2418  return rb_str_eql_internal(obj, recv);
2419  }
2420  }
2421  return Qundef;
2422 
2423  compare_by_identity:
2424  return RBOOL(recv == obj);
2425 }
2426 
2427 static VALUE
2428 opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2429 {
2430  VM_ASSERT(cd_owner != NULL);
2431 
2432  VALUE val = opt_equality_specialized(recv, obj);
2433  if (!UNDEF_P(val)) return val;
2434 
2435  if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2436  return Qundef;
2437  }
2438  else {
2439  return RBOOL(recv == obj);
2440  }
2441 }
2442 
2443 #undef EQ_UNREDEFINED_P
2444 
2445 static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
2446 NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2447 
2448 static VALUE
2449 opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2450 {
2451  const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
2452 
2453  if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2454  return RBOOL(recv == obj);
2455  }
2456  else {
2457  return Qundef;
2458  }
2459 }
2460 
2461 static VALUE
2462 opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2463 {
2464  VALUE val = opt_equality_specialized(recv, obj);
2465  if (!UNDEF_P(val)) {
2466  return val;
2467  }
2468  else {
2469  return opt_equality_by_mid_slowpath(recv, obj, mid);
2470  }
2471 }
2472 
2473 VALUE
2474 rb_equal_opt(VALUE obj1, VALUE obj2)
2475 {
2476  return opt_equality_by_mid(obj1, obj2, idEq);
2477 }
2478 
2479 VALUE
2480 rb_eql_opt(VALUE obj1, VALUE obj2)
2481 {
2482  return opt_equality_by_mid(obj1, obj2, idEqlP);
2483 }
2484 
2485 extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2486 extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2487 
2488 static VALUE
2489 check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2490 {
2491  switch (type) {
2492  case VM_CHECKMATCH_TYPE_WHEN:
2493  return pattern;
2494  case VM_CHECKMATCH_TYPE_RESCUE:
2495  if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2496  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2497  }
2498  /* fall through */
2499  case VM_CHECKMATCH_TYPE_CASE: {
2500  return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2501  }
2502  default:
2503  rb_bug("check_match: unreachable");
2504  }
2505 }
2506 
2507 
2508 #if MSC_VERSION_BEFORE(1300)
2509 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2510 #else
2511 #define CHECK_CMP_NAN(a, b) /* do nothing */
2512 #endif
2513 
2514 static inline VALUE
2515 double_cmp_lt(double a, double b)
2516 {
2517  CHECK_CMP_NAN(a, b);
2518  return RBOOL(a < b);
2519 }
2520 
2521 static inline VALUE
2522 double_cmp_le(double a, double b)
2523 {
2524  CHECK_CMP_NAN(a, b);
2525  return RBOOL(a <= b);
2526 }
2527 
2528 static inline VALUE
2529 double_cmp_gt(double a, double b)
2530 {
2531  CHECK_CMP_NAN(a, b);
2532  return RBOOL(a > b);
2533 }
2534 
2535 static inline VALUE
2536 double_cmp_ge(double a, double b)
2537 {
2538  CHECK_CMP_NAN(a, b);
2539  return RBOOL(a >= b);
2540 }
2541 
2542 // Copied by vm_dump.c
2543 static inline VALUE *
2544 vm_base_ptr(const rb_control_frame_t *cfp)
2545 {
2546  const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2547 
2548  if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2549  VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
2550 
2551  if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
2552  int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
2553  int params = ISEQ_BODY(cfp->iseq)->param.size;
2554 
2555  CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
2556  bp += vm_ci_argc(ci);
2557  }
2558 
2559  if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
2560  /* adjust `self' */
2561  bp += 1;
2562  }
2563 #if VM_DEBUG_BP_CHECK
2564  if (bp != cfp->bp_check) {
2565  ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2566  (long)(cfp->bp_check - GET_EC()->vm_stack),
2567  (long)(bp - GET_EC()->vm_stack));
2568  rb_bug("vm_base_ptr: unreachable");
2569  }
2570 #endif
2571  return bp;
2572  }
2573  else {
2574  return NULL;
2575  }
2576 }
2577 
2578 VALUE *
2579 rb_vm_base_ptr(const rb_control_frame_t *cfp)
2580 {
2581  return vm_base_ptr(cfp);
2582 }
2583 
2584 /* method call processes with call_info */
2585 
2586 #include "vm_args.c"
2587 
2588 static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2589 ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2590 static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2591 static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2592 static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2593 static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2594 static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2595 
2596 static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2597 
2598 static VALUE
2599 vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2600 {
2601  RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2602 
2603  return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2604 }
2605 
2606 static VALUE
2607 vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2608 {
2609  RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2610 
2611  const struct rb_callcache *cc = calling->cc;
2612  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2613  int param = ISEQ_BODY(iseq)->param.size;
2614  int local = ISEQ_BODY(iseq)->local_table_size;
2615  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2616 }
2617 
2618 bool
2619 rb_simple_iseq_p(const rb_iseq_t *iseq)
2620 {
2621  return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2622  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2623  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2624  ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2625  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2626  ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2627  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2628  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2629 }
2630 
2631 bool
2632 rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2633 {
2634  return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
2635  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2636  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2637  ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
2638  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2639  ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
2640  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2641  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2642 }
2643 
2644 bool
2645 rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2646 {
2647  return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
2648  ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
2649  ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
2650  ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
2651  ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
2652  ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
2653  ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
2654 }
2655 
2656 #define ALLOW_HEAP_ARGV (-2)
2657 #define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2658 
2659 static inline bool
2660 vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
2661 {
2662  vm_check_canary(GET_EC(), cfp->sp);
2663  bool ret = false;
2664 
2665  if (!NIL_P(ary)) {
2666  const VALUE *ptr = RARRAY_CONST_PTR(ary);
2667  long len = RARRAY_LEN(ary);
2668  int argc = calling->argc;
2669 
2670  if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
2671  /* Avoid SystemStackError when splatting large arrays by storing arguments in
2672  * a temporary array, instead of trying to keeping arguments on the VM stack.
2673  */
2674  VALUE *argv = cfp->sp - argc;
2675  VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
2676  rb_ary_cat(argv_ary, argv, argc);
2677  rb_ary_cat(argv_ary, ptr, len);
2678  cfp->sp -= argc - 1;
2679  cfp->sp[-1] = argv_ary;
2680  calling->argc = 1;
2681  calling->heap_argv = argv_ary;
2682  RB_GC_GUARD(ary);
2683  }
2684  else {
2685  long i;
2686 
2687  if (max_args >= 0 && len + argc > max_args) {
2688  /* If only a given max_args is allowed, copy up to max args.
2689  * Used by vm_callee_setup_block_arg for non-lambda blocks,
2690  * where additional arguments are ignored.
2691  *
2692  * Also, copy up to one more argument than the maximum,
2693  * in case it is an empty keyword hash that will be removed.
2694  */
2695  calling->argc += len - (max_args - argc + 1);
2696  len = max_args - argc + 1;
2697  ret = true;
2698  }
2699  else {
2700  /* Unset heap_argv if set originally. Can happen when
2701  * forwarding modified arguments, where heap_argv was used
2702  * originally, but heap_argv not supported by the forwarded
2703  * method in all cases.
2704  */
2705  calling->heap_argv = 0;
2706  }
2707  CHECK_VM_STACK_OVERFLOW(cfp, len);
2708 
2709  for (i = 0; i < len; i++) {
2710  *cfp->sp++ = ptr[i];
2711  }
2712  calling->argc += i;
2713  }
2714  }
2715 
2716  return ret;
2717 }
2718 
2719 static inline void
2720 vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
2721 {
2722  const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
2723  const int kw_len = vm_ci_kwarg(ci)->keyword_len;
2724  const VALUE h = rb_hash_new_with_size(kw_len);
2725  VALUE *sp = cfp->sp;
2726  int i;
2727 
2728  for (i=0; i<kw_len; i++) {
2729  rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
2730  }
2731  (sp-kw_len)[0] = h;
2732 
2733  cfp->sp -= kw_len - 1;
2734  calling->argc -= kw_len - 1;
2735  calling->kw_splat = 1;
2736 }
2737 
2738 static inline VALUE
2739 vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
2740 {
2741  if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
2742  if (keyword_hash != Qnil) {
2743  /* Convert a non-hash keyword splat to a new hash */
2744  keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
2745  }
2746  }
2747  else if (!IS_ARGS_KW_SPLAT_MUT(ci) && !RHASH_EMPTY_P(keyword_hash)) {
2748  /* Convert a hash keyword splat to a new hash unless
2749  * a mutable keyword splat was passed.
2750  * Skip allocating new hash for empty keyword splat, as empty
2751  * keyword splat will be ignored by both callers.
2752  */
2753  keyword_hash = rb_hash_dup(keyword_hash);
2754  }
2755  return keyword_hash;
2756 }
2757 
2758 static inline void
2759 CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2760  struct rb_calling_info *restrict calling,
2761  const struct rb_callinfo *restrict ci, int max_args)
2762 {
2763  if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2764  if (IS_ARGS_KW_SPLAT(ci)) {
2765  // f(*a, **kw)
2766  VM_ASSERT(calling->kw_splat == 1);
2767 
2768  cfp->sp -= 2;
2769  calling->argc -= 2;
2770  VALUE ary = cfp->sp[0];
2771  VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
2772 
2773  // splat a
2774  if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
2775 
2776  // put kw
2777  if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
2778  if (UNLIKELY(calling->heap_argv)) {
2779  rb_ary_push(calling->heap_argv, kwh);
2780  ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
2781  if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
2782  calling->kw_splat = 0;
2783  }
2784  }
2785  else {
2786  cfp->sp[0] = kwh;
2787  cfp->sp++;
2788  calling->argc++;
2789 
2790  VM_ASSERT(calling->kw_splat == 1);
2791  }
2792  }
2793  else {
2794  calling->kw_splat = 0;
2795  }
2796  }
2797  else {
2798  // f(*a)
2799  VM_ASSERT(calling->kw_splat == 0);
2800 
2801  cfp->sp -= 1;
2802  calling->argc -= 1;
2803  VALUE ary = cfp->sp[0];
2804 
2805  if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
2806  goto check_keyword;
2807  }
2808 
2809  // check the last argument
2810  VALUE last_hash, argv_ary;
2811  if (UNLIKELY(argv_ary = calling->heap_argv)) {
2812  if (!IS_ARGS_KEYWORD(ci) &&
2813  RARRAY_LEN(argv_ary) > 0 &&
2814  RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
2815  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2816 
2817  rb_ary_pop(argv_ary);
2818  if (!RHASH_EMPTY_P(last_hash)) {
2819  rb_ary_push(argv_ary, rb_hash_dup(last_hash));
2820  calling->kw_splat = 1;
2821  }
2822  }
2823  }
2824  else {
2825 check_keyword:
2826  if (!IS_ARGS_KEYWORD(ci) &&
2827  calling->argc > 0 &&
2828  RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
2829  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2830 
2831  if (RHASH_EMPTY_P(last_hash)) {
2832  calling->argc--;
2833  cfp->sp -= 1;
2834  }
2835  else {
2836  cfp->sp[-1] = rb_hash_dup(last_hash);
2837  calling->kw_splat = 1;
2838  }
2839  }
2840  }
2841  }
2842  }
2843  else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
2844  // f(**kw)
2845  VM_ASSERT(calling->kw_splat == 1);
2846  VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
2847 
2848  if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
2849  cfp->sp--;
2850  calling->argc--;
2851  calling->kw_splat = 0;
2852  }
2853  else {
2854  cfp->sp[-1] = kwh;
2855  }
2856  }
2857  else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
2858  // f(k1:1, k2:2)
2859  VM_ASSERT(calling->kw_splat == 0);
2860 
2861  /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2862  * by creating a keyword hash.
2863  * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2864  */
2865  vm_caller_setup_arg_kw(cfp, calling, ci);
2866  }
2867 }
2868 
2869 #define USE_OPT_HIST 0
2870 
2871 #if USE_OPT_HIST
2872 #define OPT_HIST_MAX 64
2873 static int opt_hist[OPT_HIST_MAX+1];
2874 
2875 __attribute__((destructor))
2876 static void
2877 opt_hist_show_results_at_exit(void)
2878 {
2879  for (int i=0; i<OPT_HIST_MAX; i++) {
2880  ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2881  }
2882 }
2883 #endif
2884 
2885 static VALUE
2886 vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2887  struct rb_calling_info *calling)
2888 {
2889  const struct rb_callcache *cc = calling->cc;
2890  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2891  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2892  const int opt = calling->argc - lead_num;
2893  const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
2894  const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2895  const int param = ISEQ_BODY(iseq)->param.size;
2896  const int local = ISEQ_BODY(iseq)->local_table_size;
2897  const int delta = opt_num - opt;
2898 
2899  RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2900 
2901 #if USE_OPT_HIST
2902  if (opt_pc < OPT_HIST_MAX) {
2903  opt_hist[opt]++;
2904  }
2905  else {
2906  opt_hist[OPT_HIST_MAX]++;
2907  }
2908 #endif
2909 
2910  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2911 }
2912 
2913 static VALUE
2914 vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2915  struct rb_calling_info *calling)
2916 {
2917  const struct rb_callcache *cc = calling->cc;
2918  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2919  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2920  const int opt = calling->argc - lead_num;
2921  const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
2922 
2923  RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2924 
2925 #if USE_OPT_HIST
2926  if (opt_pc < OPT_HIST_MAX) {
2927  opt_hist[opt]++;
2928  }
2929  else {
2930  opt_hist[OPT_HIST_MAX]++;
2931  }
2932 #endif
2933 
2934  return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2935 }
2936 
2937 static void
2938 args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2939  VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2940  VALUE *const locals);
2941 
2942 static VALUE
2943 vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2944  struct rb_calling_info *calling)
2945 {
2946  const struct rb_callcache *cc = calling->cc;
2947  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2948  int param_size = ISEQ_BODY(iseq)->param.size;
2949  int local_size = ISEQ_BODY(iseq)->local_table_size;
2950 
2951  // Setting up local size and param size
2952  VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
2953 
2954  local_size = local_size + vm_ci_argc(calling->cd->ci);
2955  param_size = param_size + vm_ci_argc(calling->cd->ci);
2956 
2957  cfp->sp[0] = (VALUE)calling->cd->ci;
2958 
2959  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
2960 }
2961 
2962 static VALUE
2963 vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2964  struct rb_calling_info *calling)
2965 {
2966  const struct rb_callinfo *ci = calling->cd->ci;
2967  const struct rb_callcache *cc = calling->cc;
2968 
2969  VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2970  RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2971 
2972  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2973  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
2974  const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2975  const int ci_kw_len = kw_arg->keyword_len;
2976  const VALUE * const ci_keywords = kw_arg->keywords;
2977  VALUE *argv = cfp->sp - calling->argc;
2978  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2979  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
2980  VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2981  MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2982  args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2983 
2984  int param = ISEQ_BODY(iseq)->param.size;
2985  int local = ISEQ_BODY(iseq)->local_table_size;
2986  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2987 }
2988 
2989 static VALUE
2990 vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2991  struct rb_calling_info *calling)
2992 {
2993  const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
2994  const struct rb_callcache *cc = calling->cc;
2995 
2996  VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2997  RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2998 
2999  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3000  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3001  VALUE * const argv = cfp->sp - calling->argc;
3002  VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
3003 
3004  int i;
3005  for (i=0; i<kw_param->num; i++) {
3006  klocals[i] = kw_param->default_values[i];
3007  }
3008  klocals[i] = INT2FIX(0); // kw specify flag
3009  // NOTE:
3010  // nobody check this value, but it should be cleared because it can
3011  // points invalid VALUE (T_NONE objects, raw pointer and so on).
3012 
3013  int param = ISEQ_BODY(iseq)->param.size;
3014  int local = ISEQ_BODY(iseq)->local_table_size;
3015  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
3016 }
3017 
3018 static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
3019 
3020 static VALUE
3021 vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
3022  struct rb_calling_info *calling)
3023 {
3024  const struct rb_builtin_function *bf = calling->cc->aux_.bf;
3025  cfp->sp -= (calling->argc + 1);
3026  rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
3027  return builtin_invoker0(ec, calling->recv, NULL, func_ptr);
3028 }
3029 
3030 VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
3031 
3032 static void
3033 warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
3034 {
3035  rb_vm_t *vm = GET_VM();
3036  st_table *dup_check_table = vm->unused_block_warning_table;
3037  st_data_t key;
3038 
3039  union {
3040  VALUE v;
3041  unsigned char b[SIZEOF_VALUE];
3042  } k1 = {
3043  .v = (VALUE)pc,
3044  }, k2 = {
3045  .v = (VALUE)cme->def,
3046  };
3047 
3048  // relax check
3049  if (!vm->unused_block_warning_strict) {
3050  key = (st_data_t)cme->def->original_id;
3051 
3052  if (st_lookup(dup_check_table, key, NULL)) {
3053  return;
3054  }
3055  }
3056 
3057  // strict check
3058  // make unique key from pc and me->def pointer
3059  key = 0;
3060  for (int i=0; i<SIZEOF_VALUE; i++) {
3061  // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3062  key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
3063  }
3064 
3065  if (0) {
3066  fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
3067  fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
3068  fprintf(stderr, "key:%p\n", (void *)key);
3069  }
3070 
3071  // duplication check
3072  if (st_insert(dup_check_table, key, 1)) {
3073  // already shown
3074  }
3075  else {
3076  VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
3077  VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
3078 
3079  if (!NIL_P(m_loc)) {
3080  rb_warning("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
3081  name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
3082  }
3083  else {
3084  rb_warning("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
3085  }
3086  }
3087 }
3088 
3089 static inline int
3090 vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
3091  const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
3092 {
3093  const struct rb_callinfo *ci = calling->cd->ci;
3094  const struct rb_callcache *cc = calling->cc;
3095 
3096  VM_ASSERT((vm_ci_argc(ci), 1));
3097  VM_ASSERT(vm_cc_cme(cc) != NULL);
3098 
3099  if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
3100  calling->block_handler != VM_BLOCK_HANDLER_NONE &&
3101  !(vm_ci_flag(calling->cd->ci) & (VM_CALL_OPT_SEND | VM_CALL_SUPER)))) {
3102  warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
3103  }
3104 
3105  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
3106  if (LIKELY(rb_simple_iseq_p(iseq))) {
3107  rb_control_frame_t *cfp = ec->cfp;
3108  int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3109  CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
3110 
3111  if (calling->argc != lead_num) {
3112  argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
3113  }
3114 
3115  //VM_ASSERT(ci == calling->cd->ci);
3116  VM_ASSERT(cc == calling->cc);
3117 
3118  if (vm_call_iseq_optimizable_p(ci, cc)) {
3119  if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
3120  !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
3121  VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
3122  vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
3123  CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
3124  }
3125  else {
3126  CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
3127  }
3128  }
3129  return 0;
3130  }
3131  else if (rb_iseq_only_optparam_p(iseq)) {
3132  rb_control_frame_t *cfp = ec->cfp;
3133 
3134  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3135  const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
3136 
3137  CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
3138  const int argc = calling->argc;
3139  const int opt = argc - lead_num;
3140 
3141  if (opt < 0 || opt > opt_num) {
3142  argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
3143  }
3144 
3145  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3146  CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
3147  !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3148  vm_call_cacheable(ci, cc));
3149  }
3150  else {
3151  CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
3152  !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
3153  vm_call_cacheable(ci, cc));
3154  }
3155 
3156  /* initialize opt vars for self-references */
3157  VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
3158  for (int i=argc; i<lead_num + opt_num; i++) {
3159  argv[i] = Qnil;
3160  }
3161  return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
3162  }
3163  else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
3164  const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
3165  const int argc = calling->argc;
3166  const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
3167 
3168  if (vm_ci_flag(ci) & VM_CALL_KWARG) {
3169  const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
3170 
3171  if (argc - kw_arg->keyword_len == lead_num) {
3172  const int ci_kw_len = kw_arg->keyword_len;
3173  const VALUE * const ci_keywords = kw_arg->keywords;
3174  VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
3175  MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
3176 
3177  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3178  args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
3179 
3180  CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
3181  vm_call_cacheable(ci, cc));
3182 
3183  return 0;
3184  }
3185  }
3186  else if (argc == lead_num) {
3187  /* no kwarg */
3188  VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
3189  args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
3190 
3191  if (klocals[kw_param->num] == INT2FIX(0)) {
3192  /* copy from default_values */
3193  CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
3194  vm_call_cacheable(ci, cc));
3195  }
3196 
3197  return 0;
3198  }
3199  }
3200  }
3201 
3202  // Called iseq is using ... param
3203  // def foo(...) # <- iseq for foo will have "forwardable"
3204  //
3205  // We want to set the `...` local to the caller's CI
3206  // foo(1, 2) # <- the ci for this should end up as `...`
3207  //
3208  // So hopefully the stack looks like:
3209  //
3210  // => 1
3211  // => 2
3212  // => *
3213  // => **
3214  // => &
3215  // => ... # <- points at `foo`s CI
3216  // => cref_or_me
3217  // => specval
3218  // => type
3219  //
3220  if (ISEQ_BODY(iseq)->param.flags.forwardable) {
3221  bool can_fastpath = true;
3222 
3223  if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3224  struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
3225  if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
3226  ci = vm_ci_new_runtime(
3227  vm_ci_mid(ci),
3228  vm_ci_flag(ci),
3229  vm_ci_argc(ci),
3230  vm_ci_kwarg(ci));
3231  } else {
3232  ci = forward_cd->caller_ci;
3233  }
3234  can_fastpath = false;
3235  }
3236  // C functions calling iseqs will stack allocate a CI,
3237  // so we need to convert it to heap allocated
3238  if (!vm_ci_markable(ci)) {
3239  ci = vm_ci_new_runtime(
3240  vm_ci_mid(ci),
3241  vm_ci_flag(ci),
3242  vm_ci_argc(ci),
3243  vm_ci_kwarg(ci));
3244  can_fastpath = false;
3245  }
3246  argv[param_size - 1] = (VALUE)ci;
3247  CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
3248  return 0;
3249  }
3250 
3251  return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
3252 }
3253 
3254 static void
3255 vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
3256 {
3257  // This case is when the caller is using a ... parameter.
3258  // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
3259  // In this case the caller's caller's CI will be on the stack.
3260  //
3261  // For example:
3262  //
3263  // def bar(a, b); a + b; end
3264  // def foo(...); bar(...); end
3265  // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
3266  //
3267  // Stack layout will be:
3268  //
3269  // > 1
3270  // > 2
3271  // > CI for foo(1, 2)
3272  // > cref_or_me
3273  // > specval
3274  // > type
3275  // > receiver
3276  // > CI for foo(1, 2), via `getlocal ...`
3277  // > ( SP points here )
3278  const VALUE * lep = VM_CF_LEP(cfp);
3279 
3280  const rb_iseq_t *iseq;
3281 
3282  // If we're in an escaped environment (lambda for example), get the iseq
3283  // from the captured env.
3284  if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
3285  rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
3286  iseq = env->iseq;
3287  }
3288  else { // Otherwise use the lep to find the caller
3289  iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
3290  }
3291 
3292  // Our local storage is below the args we need to copy
3293  int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
3294 
3295  const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
3296  VALUE * to = cfp->sp - 1; // clobber the CI
3297 
3298  if (RTEST(splat)) {
3299  to -= 1; // clobber the splat array
3300  CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
3301  MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
3302  to += RARRAY_LEN(splat);
3303  }
3304 
3305  CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
3306  MEMCPY(to, from, VALUE, argc);
3307  cfp->sp = to + argc;
3308 
3309  // Stack layout should now be:
3310  //
3311  // > 1
3312  // > 2
3313  // > CI for foo(1, 2)
3314  // > cref_or_me
3315  // > specval
3316  // > type
3317  // > receiver
3318  // > 1
3319  // > 2
3320  // > ( SP points here )
3321 }
3322 
3323 static VALUE
3324 vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3325 {
3326  RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3327 
3328  const struct rb_callcache *cc = calling->cc;
3329  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3330  int param_size = ISEQ_BODY(iseq)->param.size;
3331  int local_size = ISEQ_BODY(iseq)->local_table_size;
3332 
3333  RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
3334 
3335  const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3336  return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3337 }
3338 
3339 static VALUE
3340 vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3341 {
3342  RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
3343 
3344  const struct rb_callcache *cc = calling->cc;
3345  const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
3346  int param_size = ISEQ_BODY(iseq)->param.size;
3347  int local_size = ISEQ_BODY(iseq)->local_table_size;
3348 
3349  RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
3350 
3351  // Setting up local size and param size
3352  local_size = local_size + vm_ci_argc(calling->cd->ci);
3353  param_size = param_size + vm_ci_argc(calling->cd->ci);
3354 
3355  const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
3356  return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
3357 }
3358 
3359 static inline VALUE
3360 vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3361  int opt_pc, int param_size, int local_size)
3362 {
3363  const struct rb_callinfo *ci = calling->cd->ci;
3364  const struct rb_callcache *cc = calling->cc;
3365 
3366  if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
3367  return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
3368  }
3369  else {
3370  return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
3371  }
3372 }
3373 
3374 static inline VALUE
3375 vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
3376  int opt_pc, int param_size, int local_size)
3377 {
3378  const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3379  VALUE *argv = cfp->sp - calling->argc;
3380  VALUE *sp = argv + param_size;
3381  cfp->sp = argv - 1 /* recv */;
3382 
3383  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
3384  calling->block_handler, (VALUE)me,
3385  ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3386  local_size - param_size,
3387  ISEQ_BODY(iseq)->stack_max);
3388  return Qundef;
3389 }
3390 
3391 static inline VALUE
3392 vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
3393 {
3394  const struct rb_callcache *cc = calling->cc;
3395  unsigned int i;
3396  VALUE *argv = cfp->sp - calling->argc;
3397  const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3398  const rb_iseq_t *iseq = def_iseq_ptr(me->def);
3399  VALUE *src_argv = argv;
3400  VALUE *sp_orig, *sp;
3401  VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
3402 
3403  if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
3404  struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
3405  const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
3406  dst_captured->code.val = src_captured->code.val;
3407  if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
3408  calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
3409  }
3410  else {
3411  calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
3412  }
3413  }
3414 
3415  vm_pop_frame(ec, cfp, cfp->ep);
3416  cfp = ec->cfp;
3417 
3418  sp_orig = sp = cfp->sp;
3419 
3420  /* push self */
3421  sp[0] = calling->recv;
3422  sp++;
3423 
3424  /* copy arguments */
3425  for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
3426  *sp++ = src_argv[i];
3427  }
3428 
3429  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
3430  calling->recv, calling->block_handler, (VALUE)me,
3431  ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
3432  ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
3433  ISEQ_BODY(iseq)->stack_max);
3434 
3435  cfp->sp = sp_orig;
3436 
3437  return Qundef;
3438 }
3439 
3440 static void
3441 ractor_unsafe_check(void)
3442 {
3443  if (!rb_ractor_main_p()) {
3444  rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
3445  }
3446 }
3447 
3448 static VALUE
3449 call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3450 {
3451  ractor_unsafe_check();
3452  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3453  return (*f)(recv, rb_ary_new4(argc, argv));
3454 }
3455 
3456 static VALUE
3457 call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3458 {
3459  ractor_unsafe_check();
3460  VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3461  return (*f)(argc, argv, recv);
3462 }
3463 
3464 static VALUE
3465 call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3466 {
3467  ractor_unsafe_check();
3468  VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3469  return (*f)(recv);
3470 }
3471 
3472 static VALUE
3473 call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3474 {
3475  ractor_unsafe_check();
3476  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3477  return (*f)(recv, argv[0]);
3478 }
3479 
3480 static VALUE
3481 call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3482 {
3483  ractor_unsafe_check();
3484  VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3485  return (*f)(recv, argv[0], argv[1]);
3486 }
3487 
3488 static VALUE
3489 call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3490 {
3491  ractor_unsafe_check();
3492  VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3493  return (*f)(recv, argv[0], argv[1], argv[2]);
3494 }
3495 
3496 static VALUE
3497 call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3498 {
3499  ractor_unsafe_check();
3500  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3501  return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3502 }
3503 
3504 static VALUE
3505 call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3506 {
3507  ractor_unsafe_check();
3508  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3509  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3510 }
3511 
3512 static VALUE
3513 call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3514 {
3515  ractor_unsafe_check();
3517  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3518 }
3519 
3520 static VALUE
3521 call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3522 {
3523  ractor_unsafe_check();
3525  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3526 }
3527 
3528 static VALUE
3529 call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3530 {
3531  ractor_unsafe_check();
3533  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3534 }
3535 
3536 static VALUE
3537 call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3538 {
3539  ractor_unsafe_check();
3541  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3542 }
3543 
3544 static VALUE
3545 call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3546 {
3547  ractor_unsafe_check();
3549  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3550 }
3551 
3552 static VALUE
3553 call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3554 {
3555  ractor_unsafe_check();
3557  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3558 }
3559 
3560 static VALUE
3561 call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3562 {
3563  ractor_unsafe_check();
3565  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3566 }
3567 
3568 static VALUE
3569 call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3570 {
3571  ractor_unsafe_check();
3573  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3574 }
3575 
3576 static VALUE
3577 call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3578 {
3579  ractor_unsafe_check();
3581  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3582 }
3583 
3584 static VALUE
3585 call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3586 {
3587  ractor_unsafe_check();
3589  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3590 }
3591 
3592 static VALUE
3593 ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3594 {
3595  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3596  return (*f)(recv, rb_ary_new4(argc, argv));
3597 }
3598 
3599 static VALUE
3600 ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3601 {
3602  VALUE(*f)(int, const VALUE *, VALUE) = (VALUE(*)(int, const VALUE *, VALUE))func;
3603  return (*f)(argc, argv, recv);
3604 }
3605 
3606 static VALUE
3607 ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3608 {
3609  VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
3610  return (*f)(recv);
3611 }
3612 
3613 static VALUE
3614 ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3615 {
3616  VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
3617  return (*f)(recv, argv[0]);
3618 }
3619 
3620 static VALUE
3621 ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3622 {
3623  VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
3624  return (*f)(recv, argv[0], argv[1]);
3625 }
3626 
3627 static VALUE
3628 ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3629 {
3630  VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
3631  return (*f)(recv, argv[0], argv[1], argv[2]);
3632 }
3633 
3634 static VALUE
3635 ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3636 {
3637  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
3638  return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
3639 }
3640 
3641 static VALUE
3642 ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3643 {
3644  VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
3645  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
3646 }
3647 
3648 static VALUE
3649 ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3650 {
3652  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
3653 }
3654 
3655 static VALUE
3656 ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3657 {
3659  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
3660 }
3661 
3662 static VALUE
3663 ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3664 {
3666  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
3667 }
3668 
3669 static VALUE
3670 ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3671 {
3673  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
3674 }
3675 
3676 static VALUE
3677 ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3678 {
3680  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
3681 }
3682 
3683 static VALUE
3684 ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3685 {
3687  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
3688 }
3689 
3690 static VALUE
3691 ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3692 {
3694  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
3695 }
3696 
3697 static VALUE
3698 ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3699 {
3701  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
3702 }
3703 
3704 static VALUE
3705 ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3706 {
3708  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
3709 }
3710 
3711 static VALUE
3712 ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
3713 {
3715  return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
3716 }
3717 
3718 static inline int
3719 vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
3720 {
3721  const int ov_flags = RAISED_STACKOVERFLOW;
3722  if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
3723  if (rb_ec_raised_p(ec, ov_flags)) {
3724  rb_ec_raised_reset(ec, ov_flags);
3725  return TRUE;
3726  }
3727  return FALSE;
3728 }
3729 
3730 #define CHECK_CFP_CONSISTENCY(func) \
3731  (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3732  rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3733 
3734 static inline
3735 const rb_method_cfunc_t *
3736 vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
3737 {
3738 #if VM_DEBUG_VERIFY_METHOD_CACHE
3739  switch (me->def->type) {
3740  case VM_METHOD_TYPE_CFUNC:
3741  case VM_METHOD_TYPE_NOTIMPLEMENTED:
3742  break;
3743 # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3744  METHOD_BUG(ISEQ);
3745  METHOD_BUG(ATTRSET);
3746  METHOD_BUG(IVAR);
3747  METHOD_BUG(BMETHOD);
3748  METHOD_BUG(ZSUPER);
3749  METHOD_BUG(UNDEF);
3750  METHOD_BUG(OPTIMIZED);
3751  METHOD_BUG(MISSING);
3752  METHOD_BUG(REFINED);
3753  METHOD_BUG(ALIAS);
3754 # undef METHOD_BUG
3755  default:
3756  rb_bug("wrong method type: %d", me->def->type);
3757  }
3758 #endif
3759  return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3760 }
3761 
3762 static VALUE
3763 vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3764  int argc, VALUE *argv, VALUE *stack_bottom)
3765 {
3766  RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3767  const struct rb_callinfo *ci = calling->cd->ci;
3768  const struct rb_callcache *cc = calling->cc;
3769  VALUE val;
3770  const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3771  const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3772 
3773  VALUE recv = calling->recv;
3774  VALUE block_handler = calling->block_handler;
3775  VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3776 
3777  if (UNLIKELY(calling->kw_splat)) {
3778  frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3779  }
3780 
3781  VM_ASSERT(reg_cfp == ec->cfp);
3782 
3783  RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3784  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3785 
3786  vm_push_frame(ec, NULL, frame_type, recv,
3787  block_handler, (VALUE)me,
3788  0, ec->cfp->sp, 0, 0);
3789 
3790  int len = cfunc->argc;
3791  if (len >= 0) rb_check_arity(argc, len, len);
3792 
3793  reg_cfp->sp = stack_bottom;
3794  val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
3795 
3796  CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3797 
3798  rb_vm_pop_frame(ec);
3799 
3800  VM_ASSERT(ec->cfp->sp == stack_bottom);
3801 
3802  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3803  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3804 
3805  return val;
3806 }
3807 
3808 // Push a C method frame for a given cme. This is called when JIT code skipped
3809 // pushing a frame but the C method reached a point where a frame is needed.
3810 void
3811 rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
3812 {
3813  VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
3814  rb_execution_context_t *ec = GET_EC();
3815  VALUE *sp = ec->cfp->sp;
3816  VALUE recv = *(sp - recv_idx - 1);
3817  VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3818  VALUE block_handler = VM_BLOCK_HANDLER_NONE;
3819 #if VM_CHECK_MODE > 0
3820  // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3821  *(GET_EC()->cfp->sp) = Qfalse;
3822 #endif
3823  vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
3824 }
3825 
3826 // If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3827 bool
3828 rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
3829 {
3830  return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
3831 }
3832 
3833 static VALUE
3834 vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3835 {
3836  int argc = calling->argc;
3837  VALUE *stack_bottom = reg_cfp->sp - argc - 1;
3838  VALUE *argv = &stack_bottom[1];
3839 
3840  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3841 }
3842 
3843 static VALUE
3844 vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3845 {
3846  const struct rb_callinfo *ci = calling->cd->ci;
3847  RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
3848 
3849  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
3850  VALUE argv_ary;
3851  if (UNLIKELY(argv_ary = calling->heap_argv)) {
3852  VM_ASSERT(!IS_ARGS_KEYWORD(ci));
3853  int argc = RARRAY_LENINT(argv_ary);
3854  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3855  VALUE *stack_bottom = reg_cfp->sp - 2;
3856 
3857  VM_ASSERT(calling->argc == 1);
3858  VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
3859  VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
3860 
3861  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
3862  }
3863  else {
3864  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
3865 
3866  return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3867  }
3868 }
3869 
3870 static inline VALUE
3871 vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
3872 {
3873  VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
3874  int argc = RARRAY_LENINT(argv_ary) - argc_offset;
3875 
3876  if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
3877  return vm_call_cfunc_other(ec, reg_cfp, calling);
3878  }
3879 
3880  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3881  calling->kw_splat = 0;
3882  int i;
3883  VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
3884  VALUE *sp = stack_bottom;
3885  CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
3886  for(i = 0; i < argc; i++) {
3887  *++sp = argv[i];
3888  }
3889  reg_cfp->sp = sp+1;
3890 
3891  return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
3892 }
3893 
3894 static inline VALUE
3895 vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3896 {
3897  RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
3898  VALUE argv_ary = reg_cfp->sp[-1];
3899  int argc = RARRAY_LENINT(argv_ary);
3900  VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
3901  VALUE last_hash;
3902  int argc_offset = 0;
3903 
3904  if (UNLIKELY(argc > 0 &&
3905  RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
3906  (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
3907  if (!RHASH_EMPTY_P(last_hash)) {
3908  return vm_call_cfunc_other(ec, reg_cfp, calling);
3909  }
3910  argc_offset++;
3911  }
3912  return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
3913 }
3914 
3915 static inline VALUE
3916 vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3917 {
3918  RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
3919  VALUE keyword_hash = reg_cfp->sp[-1];
3920 
3921  if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
3922  return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
3923  }
3924 
3925  return vm_call_cfunc_other(ec, reg_cfp, calling);
3926 }
3927 
3928 static VALUE
3929 vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3930 {
3931  const struct rb_callinfo *ci = calling->cd->ci;
3932  RB_DEBUG_COUNTER_INC(ccf_cfunc);
3933 
3934  if (IS_ARGS_SPLAT(ci) && !(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
3935  if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
3936  // f(*a)
3937  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
3938  return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
3939  }
3940  if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
3941  // f(*a, **kw)
3942  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
3943  return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
3944  }
3945  }
3946 
3947  CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
3948  return vm_call_cfunc_other(ec, reg_cfp, calling);
3949 }
3950 
3951 static VALUE
3952 vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3953 {
3954  const struct rb_callcache *cc = calling->cc;
3955  RB_DEBUG_COUNTER_INC(ccf_ivar);
3956  cfp->sp -= 1;
3957  VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
3958  return ivar;
3959 }
3960 
3961 static VALUE
3962 vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
3963 {
3964  RB_DEBUG_COUNTER_INC(ccf_attrset);
3965  VALUE val = *(cfp->sp - 1);
3966  cfp->sp -= 2;
3967  attr_index_t index = vm_cc_attr_index(cc);
3968  shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
3969  ID id = vm_cc_cme(cc)->def->body.attr.id;
3970  rb_check_frozen(obj);
3971  VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
3972  if (UNDEF_P(res)) {
3973  switch (BUILTIN_TYPE(obj)) {
3974  case T_OBJECT:
3975  case T_CLASS:
3976  case T_MODULE:
3977  break;
3978  default:
3979  {
3980  res = vm_setivar_default(obj, id, val, dest_shape_id, index);
3981  if (!UNDEF_P(res)) {
3982  return res;
3983  }
3984  }
3985  }
3986  res = vm_setivar_slowpath_attr(obj, id, val, cc);
3987  }
3988  return res;
3989 }
3990 
3991 static VALUE
3992 vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3993 {
3994  return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
3995 }
3996 
3997 static inline VALUE
3998 vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3999 {
4000  rb_proc_t *proc;
4001  VALUE val;
4002  const struct rb_callcache *cc = calling->cc;
4003  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4004  VALUE procv = cme->def->body.bmethod.proc;
4005 
4006  if (!RB_OBJ_SHAREABLE_P(procv) &&
4007  cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4008  rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4009  }
4010 
4011  /* control block frame */
4012  GetProcPtr(procv, proc);
4013  val = vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
4014 
4015  return val;
4016 }
4017 
4018 static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
4019 
4020 static VALUE
4021 vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4022 {
4023  RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
4024 
4025  const struct rb_callcache *cc = calling->cc;
4026  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4027  VALUE procv = cme->def->body.bmethod.proc;
4028 
4029  if (!RB_OBJ_SHAREABLE_P(procv) &&
4030  cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
4031  rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
4032  }
4033 
4034  rb_proc_t *proc;
4035  GetProcPtr(procv, proc);
4036  const struct rb_block *block = &proc->block;
4037 
4038  while (vm_block_type(block) == block_type_proc) {
4039  block = vm_proc_block(block->as.proc);
4040  }
4041  VM_ASSERT(vm_block_type(block) == block_type_iseq);
4042 
4043  const struct rb_captured_block *captured = &block->as.captured;
4044  const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4045  VALUE * const argv = cfp->sp - calling->argc;
4046  const int arg_size = ISEQ_BODY(iseq)->param.size;
4047 
4048  int opt_pc;
4049  if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
4050  opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
4051  }
4052  else {
4053  opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
4054  }
4055 
4056  cfp->sp = argv - 1; // -1 for the receiver
4057 
4058  vm_push_frame(ec, iseq,
4059  VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
4060  calling->recv,
4061  VM_GUARDED_PREV_EP(captured->ep),
4062  (VALUE)cme,
4063  ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
4064  argv + arg_size,
4065  ISEQ_BODY(iseq)->local_table_size - arg_size,
4066  ISEQ_BODY(iseq)->stack_max);
4067 
4068  return Qundef;
4069 }
4070 
4071 static VALUE
4072 vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4073 {
4074  RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
4075 
4076  VALUE *argv;
4077  int argc;
4078  CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
4079  if (UNLIKELY(calling->heap_argv)) {
4080  argv = RARRAY_PTR(calling->heap_argv);
4081  cfp->sp -= 2;
4082  }
4083  else {
4084  argc = calling->argc;
4085  argv = ALLOCA_N(VALUE, argc);
4086  MEMCPY(argv, cfp->sp - argc, VALUE, argc);
4087  cfp->sp += - argc - 1;
4088  }
4089 
4090  return vm_call_bmethod_body(ec, calling, argv);
4091 }
4092 
4093 static VALUE
4094 vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4095 {
4096  RB_DEBUG_COUNTER_INC(ccf_bmethod);
4097 
4098  const struct rb_callcache *cc = calling->cc;
4099  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4100  VALUE procv = cme->def->body.bmethod.proc;
4101  rb_proc_t *proc;
4102  GetProcPtr(procv, proc);
4103  const struct rb_block *block = &proc->block;
4104 
4105  while (vm_block_type(block) == block_type_proc) {
4106  block = vm_proc_block(block->as.proc);
4107  }
4108  if (vm_block_type(block) == block_type_iseq) {
4109  CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
4110  return vm_call_iseq_bmethod(ec, cfp, calling);
4111  }
4112 
4113  CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
4114  return vm_call_noniseq_bmethod(ec, cfp, calling);
4115 }
4116 
4117 VALUE
4118 rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
4119 {
4120  VALUE klass = current_class;
4121 
4122  /* for prepended Module, then start from cover class */
4123  if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
4124  RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
4125  klass = RBASIC_CLASS(klass);
4126  }
4127 
4128  while (RTEST(klass)) {
4129  VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
4130  if (owner == target_owner) {
4131  return klass;
4132  }
4133  klass = RCLASS_SUPER(klass);
4134  }
4135 
4136  return current_class; /* maybe module function */
4137 }
4138 
4139 static const rb_callable_method_entry_t *
4140 aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4141 {
4142  const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
4143  const rb_callable_method_entry_t *cme;
4144 
4145  if (orig_me->defined_class == 0) {
4146  VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
4147  VM_ASSERT_TYPE(orig_me->owner, T_MODULE);
4148  cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
4149 
4150  if (me->def->reference_count == 1) {
4151  RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
4152  }
4153  else {
4154  rb_method_definition_t *def =
4155  rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
4156  rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
4157  }
4158  }
4159  else {
4160  cme = (const rb_callable_method_entry_t *)orig_me;
4161  }
4162 
4163  VM_ASSERT(callable_method_entry_p(cme));
4164  return cme;
4165 }
4166 
4168 rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
4169 {
4170  return aliased_callable_method_entry(me);
4171 }
4172 
4173 static VALUE
4174 vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4175 {
4176  calling->cc = &VM_CC_ON_STACK(Qundef,
4177  vm_call_general,
4178  {{0}},
4179  aliased_callable_method_entry(vm_cc_cme(calling->cc)));
4180 
4181  return vm_call_method_each_type(ec, cfp, calling);
4182 }
4183 
4184 static enum method_missing_reason
4185 ci_missing_reason(const struct rb_callinfo *ci)
4186 {
4187  enum method_missing_reason stat = MISSING_NOENTRY;
4188  if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4189  if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
4190  if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
4191  return stat;
4192 }
4193 
4194 static VALUE vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
4195 
4196 static VALUE
4197 vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4198  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
4199 {
4200  ASSUME(calling->argc >= 0);
4201 
4202  enum method_missing_reason missing_reason = MISSING_NOENTRY;
4203  int argc = calling->argc;
4204  VALUE recv = calling->recv;
4205  VALUE klass = CLASS_OF(recv);
4206  ID mid = rb_check_id(&symbol);
4207  flags |= VM_CALL_OPT_SEND;
4208 
4209  if (UNLIKELY(! mid)) {
4210  mid = idMethodMissing;
4211  missing_reason = ci_missing_reason(ci);
4212  ec->method_missing_reason = missing_reason;
4213 
4214  VALUE argv_ary;
4215  if (UNLIKELY(argv_ary = calling->heap_argv)) {
4216  if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4217  rb_ary_unshift(argv_ary, symbol);
4218 
4219  /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4220  int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4221  VALUE exc = rb_make_no_method_exception(
4222  rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
4223 
4224  rb_exc_raise(exc);
4225  }
4226  rb_ary_unshift(argv_ary, rb_str_intern(symbol));
4227  }
4228  else {
4229  /* E.g. when argc == 2
4230  *
4231  * | | | | TOPN
4232  * | | +------+
4233  * | | +---> | arg1 | 0
4234  * +------+ | +------+
4235  * | arg1 | -+ +-> | arg0 | 1
4236  * +------+ | +------+
4237  * | arg0 | ---+ | sym | 2
4238  * +------+ +------+
4239  * | recv | | recv | 3
4240  * --+------+--------+------+------
4241  */
4242  int i = argc;
4243  CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4244  INC_SP(1);
4245  MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
4246  argc = ++calling->argc;
4247 
4248  if (rb_method_basic_definition_p(klass, idMethodMissing)) {
4249  /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4250  TOPN(i) = symbol;
4251  int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
4252  const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
4253  VALUE exc = rb_make_no_method_exception(
4254  rb_eNoMethodError, 0, recv, argc, argv, priv);
4255 
4256  rb_exc_raise(exc);
4257  }
4258  else {
4259  TOPN(i) = rb_str_intern(symbol);
4260  }
4261  }
4262  }
4263 
4264  struct rb_forwarding_call_data new_fcd = {
4265  .cd = {
4266  .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
4267  .cc = NULL,
4268  },
4269  .caller_ci = NULL,
4270  };
4271 
4272  if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
4273  calling->cd = &new_fcd.cd;
4274  }
4275  else {
4276  const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4277  VM_ASSERT((vm_ci_argc(caller_ci), 1));
4278  new_fcd.caller_ci = caller_ci;
4279  calling->cd = (struct rb_call_data *)&new_fcd;
4280  }
4281  calling->cc = &VM_CC_ON_STACK(klass,
4282  vm_call_general,
4283  { .method_missing_reason = missing_reason },
4284  rb_callable_method_entry_with_refinements(klass, mid, NULL));
4285 
4286  if (flags & VM_CALL_FCALL) {
4287  return vm_call_method(ec, reg_cfp, calling);
4288  }
4289 
4290  const struct rb_callcache *cc = calling->cc;
4291  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4292 
4293  if (vm_cc_cme(cc) != NULL) {
4294  switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4295  case METHOD_VISI_PUBLIC: /* likely */
4296  return vm_call_method_each_type(ec, reg_cfp, calling);
4297  case METHOD_VISI_PRIVATE:
4298  vm_cc_method_missing_reason_set(cc, MISSING_PRIVATE);
4299  break;
4300  case METHOD_VISI_PROTECTED:
4301  vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4302  break;
4303  default:
4304  VM_UNREACHABLE(vm_call_method);
4305  }
4306  return vm_call_method_missing(ec, reg_cfp, calling);
4307  }
4308 
4309  return vm_call_method_nome(ec, reg_cfp, calling);
4310 }
4311 
4312 static VALUE
4313 vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
4314 {
4315  const struct rb_callinfo *ci = calling->cd->ci;
4316  int i;
4317  VALUE sym;
4318 
4319  i = calling->argc - 1;
4320 
4321  if (calling->argc == 0) {
4322  rb_raise(rb_eArgError, "no method name given");
4323  }
4324 
4325  sym = TOPN(i);
4326  /* E.g. when i == 2
4327  *
4328  * | | | | TOPN
4329  * +------+ | |
4330  * | arg1 | ---+ | | 0
4331  * +------+ | +------+
4332  * | arg0 | -+ +-> | arg1 | 1
4333  * +------+ | +------+
4334  * | sym | +---> | arg0 | 2
4335  * +------+ +------+
4336  * | recv | | recv | 3
4337  * --+------+--------+------+------
4338  */
4339  /* shift arguments */
4340  if (i > 0) {
4341  MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
4342  }
4343  calling->argc -= 1;
4344  DEC_SP(1);
4345 
4346  return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4347 }
4348 
4349 static VALUE
4350 vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4351 {
4352  RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
4353  const struct rb_callinfo *ci = calling->cd->ci;
4354  int flags = VM_CALL_FCALL;
4355  VALUE sym;
4356 
4357  VALUE argv_ary;
4358  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
4359  if (UNLIKELY(argv_ary = calling->heap_argv)) {
4360  sym = rb_ary_shift(argv_ary);
4361  flags |= VM_CALL_ARGS_SPLAT;
4362  if (calling->kw_splat) {
4363  VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
4364  ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
4365  calling->kw_splat = 0;
4366  }
4367  return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
4368  }
4369 
4370  if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
4371  return vm_call_opt_send0(ec, reg_cfp, calling, flags);
4372 }
4373 
4374 static VALUE
4375 vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4376 {
4377  RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
4378  return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
4379 }
4380 
4381 static VALUE
4382 vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4383 {
4384  RB_DEBUG_COUNTER_INC(ccf_opt_send);
4385 
4386  const struct rb_callinfo *ci = calling->cd->ci;
4387  int flags = vm_ci_flag(ci);
4388 
4389  if (UNLIKELY((flags & VM_CALL_FORWARDING) || (!(flags & VM_CALL_ARGS_SIMPLE) &&
4390  ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
4391  (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
4392  ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc)))))) {
4393  CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
4394  return vm_call_opt_send_complex(ec, reg_cfp, calling);
4395  }
4396 
4397  CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
4398  return vm_call_opt_send_simple(ec, reg_cfp, calling);
4399 }
4400 
4401 static VALUE
4402 vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
4403  const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
4404 {
4405  RB_DEBUG_COUNTER_INC(ccf_method_missing);
4406 
4407  VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4408  unsigned int argc, flag;
4409 
4410  flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
4411  argc = ++calling->argc;
4412 
4413  /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4414  CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
4415  vm_check_canary(ec, reg_cfp->sp);
4416  if (argc > 1) {
4417  MEMMOVE(argv+1, argv, VALUE, argc-1);
4418  }
4419  argv[0] = ID2SYM(vm_ci_mid(orig_ci));
4420  INC_SP(1);
4421 
4422  ec->method_missing_reason = reason;
4423 
4424  struct rb_forwarding_call_data new_fcd = {
4425  .cd = {
4426  .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
4427  .cc = NULL,
4428  },
4429  .caller_ci = NULL,
4430  };
4431 
4432  if (!(flag & VM_CALL_FORWARDING)) {
4433  calling->cd = &new_fcd.cd;
4434  }
4435  else {
4436  const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
4437  VM_ASSERT((vm_ci_argc(caller_ci), 1));
4438  new_fcd.caller_ci = caller_ci;
4439  calling->cd = (struct rb_call_data *)&new_fcd;
4440  }
4441 
4442  calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
4443  rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
4444  return vm_call_method(ec, reg_cfp, calling);
4445 }
4446 
4447 static VALUE
4448 vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4449 {
4450  return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
4451 }
4452 
4453 static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
4454 static VALUE
4455 vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
4456 {
4457  klass = RCLASS_SUPER(klass);
4458 
4459  const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
4460  if (cme == NULL) {
4461  return vm_call_method_nome(ec, cfp, calling);
4462  }
4463  if (cme->def->type == VM_METHOD_TYPE_REFINED &&
4464  cme->def->body.refined.orig_me) {
4465  cme = refined_method_callable_without_refinement(cme);
4466  }
4467 
4468  calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
4469 
4470  return vm_call_method_each_type(ec, cfp, calling);
4471 }
4472 
4473 static inline VALUE
4474 find_refinement(VALUE refinements, VALUE klass)
4475 {
4476  if (NIL_P(refinements)) {
4477  return Qnil;
4478  }
4479  return rb_hash_lookup(refinements, klass);
4480 }
4481 
4482 PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
4483 static rb_control_frame_t *
4484 current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
4485 {
4486  rb_control_frame_t *top_cfp = cfp;
4487 
4488  if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
4489  const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
4490 
4491  do {
4492  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
4493  if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
4494  /* TODO: orphan block */
4495  return top_cfp;
4496  }
4497  } while (cfp->iseq != local_iseq);
4498  }
4499  return cfp;
4500 }
4501 
4502 static const rb_callable_method_entry_t *
4503 refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
4504 {
4505  const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
4506  const rb_callable_method_entry_t *cme;
4507 
4508  if (orig_me->defined_class == 0) {
4509  cme = NULL;
4510  rb_notimplement();
4511  }
4512  else {
4513  cme = (const rb_callable_method_entry_t *)orig_me;
4514  }
4515 
4516  VM_ASSERT(callable_method_entry_p(cme));
4517 
4518  if (UNDEFINED_METHOD_ENTRY_P(cme)) {
4519  cme = NULL;
4520  }
4521 
4522  return cme;
4523 }
4524 
4525 static const rb_callable_method_entry_t *
4526 search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4527 {
4528  ID mid = vm_ci_mid(calling->cd->ci);
4529  const rb_cref_t *cref = vm_get_cref(cfp->ep);
4530  const struct rb_callcache * const cc = calling->cc;
4531  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4532 
4533  for (; cref; cref = CREF_NEXT(cref)) {
4534  const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
4535  if (NIL_P(refinement)) continue;
4536 
4537  const rb_callable_method_entry_t *const ref_me =
4538  rb_callable_method_entry(refinement, mid);
4539 
4540  if (ref_me) {
4541  if (vm_cc_call(cc) == vm_call_super_method) {
4542  const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
4543  const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
4544  if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
4545  continue;
4546  }
4547  }
4548 
4549  if (cme->def->type != VM_METHOD_TYPE_REFINED ||
4550  cme->def != ref_me->def) {
4551  cme = ref_me;
4552  }
4553  if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
4554  return cme;
4555  }
4556  }
4557  else {
4558  return NULL;
4559  }
4560  }
4561 
4562  if (vm_cc_cme(cc)->def->body.refined.orig_me) {
4563  return refined_method_callable_without_refinement(vm_cc_cme(cc));
4564  }
4565  else {
4566  VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
4567  const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
4568  return cme;
4569  }
4570 }
4571 
4572 static VALUE
4573 vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4574 {
4575  const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
4576 
4577  if (ref_cme) {
4578  if (calling->cd->cc) {
4579  const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
4580  RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
4581  return vm_call_method(ec, cfp, calling);
4582  }
4583  else {
4584  struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
4585  calling->cc= ref_cc;
4586  return vm_call_method(ec, cfp, calling);
4587  }
4588  }
4589  else {
4590  return vm_call_method_nome(ec, cfp, calling);
4591  }
4592 }
4593 
4594 static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
4595 
4596 NOINLINE(static VALUE
4597  vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4598  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
4599 
4600 static VALUE
4601 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4602  struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
4603 {
4604  int argc = calling->argc;
4605 
4606  /* remove self */
4607  if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
4608  DEC_SP(1);
4609 
4610  return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
4611 }
4612 
4613 static VALUE
4614 vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4615 {
4616  RB_DEBUG_COUNTER_INC(ccf_opt_call);
4617 
4618  const struct rb_callinfo *ci = calling->cd->ci;
4619  VALUE procval = calling->recv;
4620  return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
4621 }
4622 
4623 static VALUE
4624 vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4625 {
4626  RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
4627 
4628  VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
4629  const struct rb_callinfo *ci = calling->cd->ci;
4630 
4631  if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
4632  return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
4633  }
4634  else {
4635  calling->recv = rb_vm_bh_to_procval(ec, block_handler);
4636  calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
4637  return vm_call_general(ec, reg_cfp, calling);
4638  }
4639 }
4640 
4641 static VALUE
4642 vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
4643 {
4644  VALUE recv = calling->recv;
4645 
4646  VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4647  VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4648  VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
4649 
4650  const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4651  return internal_RSTRUCT_GET(recv, off);
4652 }
4653 
4654 static VALUE
4655 vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4656 {
4657  RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
4658 
4659  VALUE ret = vm_call_opt_struct_aref0(ec, calling);
4660  reg_cfp->sp -= 1;
4661  return ret;
4662 }
4663 
4664 static VALUE
4665 vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
4666 {
4667  VALUE recv = calling->recv;
4668 
4669  VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
4670  VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
4671  VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
4672 
4673  rb_check_frozen(recv);
4674 
4675  const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
4676  internal_RSTRUCT_SET(recv, off, val);
4677 
4678  return val;
4679 }
4680 
4681 static VALUE
4682 vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4683 {
4684  RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
4685 
4686  VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
4687  reg_cfp->sp -= 2;
4688  return ret;
4689 }
4690 
4691 NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4692  const struct rb_callinfo *ci, const struct rb_callcache *cc));
4693 
4694 #define VM_CALL_METHOD_ATTR(var, func, nohook) \
4695  if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4696  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4697  vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4698  var = func; \
4699  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4700  vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4701  } \
4702  else { \
4703  nohook; \
4704  var = func; \
4705  }
4706 
4707 static VALUE
4708 vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
4709  const struct rb_callinfo *ci, const struct rb_callcache *cc)
4710 {
4711  switch (vm_cc_cme(cc)->def->body.optimized.type) {
4712  case OPTIMIZED_METHOD_TYPE_SEND:
4713  CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
4714  return vm_call_opt_send(ec, cfp, calling);
4715  case OPTIMIZED_METHOD_TYPE_CALL:
4716  CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
4717  return vm_call_opt_call(ec, cfp, calling);
4718  case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
4719  CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
4720  return vm_call_opt_block_call(ec, cfp, calling);
4721  case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
4722  CALLER_SETUP_ARG(cfp, calling, ci, 0);
4723  rb_check_arity(calling->argc, 0, 0);
4724 
4725  VALUE v;
4726  VM_CALL_METHOD_ATTR(v,
4727  vm_call_opt_struct_aref(ec, cfp, calling),
4728  set_vm_cc_ivar(cc); \
4729  CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4730  return v;
4731  }
4732  case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
4733  CALLER_SETUP_ARG(cfp, calling, ci, 1);
4734  rb_check_arity(calling->argc, 1, 1);
4735 
4736  VALUE v;
4737  VM_CALL_METHOD_ATTR(v,
4738  vm_call_opt_struct_aset(ec, cfp, calling),
4739  set_vm_cc_ivar(cc); \
4740  CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
4741  return v;
4742  }
4743  default:
4744  rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
4745  }
4746 }
4747 
4748 static VALUE
4749 vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4750 {
4751  const struct rb_callinfo *ci = calling->cd->ci;
4752  const struct rb_callcache *cc = calling->cc;
4753  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4754  VALUE v;
4755 
4756  VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
4757 
4758  switch (cme->def->type) {
4759  case VM_METHOD_TYPE_ISEQ:
4760  if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
4761  CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
4762  return vm_call_iseq_fwd_setup(ec, cfp, calling);
4763  }
4764  else {
4765  CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
4766  return vm_call_iseq_setup(ec, cfp, calling);
4767  }
4768 
4769  case VM_METHOD_TYPE_NOTIMPLEMENTED:
4770  case VM_METHOD_TYPE_CFUNC:
4771  CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
4772  return vm_call_cfunc(ec, cfp, calling);
4773 
4774  case VM_METHOD_TYPE_ATTRSET:
4775  CALLER_SETUP_ARG(cfp, calling, ci, 1);
4776 
4777  rb_check_arity(calling->argc, 1, 1);
4778 
4779  const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
4780 
4781  if (vm_cc_markable(cc)) {
4782  vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4783  VM_CALL_METHOD_ATTR(v,
4784  vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4785  CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4786  }
4787  else {
4788  cc = &((struct rb_callcache) {
4789  .flags = T_IMEMO |
4790  (imemo_callcache << FL_USHIFT) |
4791  VM_CALLCACHE_UNMARKABLE |
4792  VM_CALLCACHE_ON_STACK,
4793  .klass = cc->klass,
4794  .cme_ = cc->cme_,
4795  .call_ = cc->call_,
4796  .aux_ = {
4797  .attr = {
4798  .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
4799  }
4800  },
4801  });
4802 
4803  VM_CALL_METHOD_ATTR(v,
4804  vm_call_attrset_direct(ec, cfp, cc, calling->recv),
4805  CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
4806  }
4807  return v;
4808 
4809  case VM_METHOD_TYPE_IVAR:
4810  CALLER_SETUP_ARG(cfp, calling, ci, 0);
4811  rb_check_arity(calling->argc, 0, 0);
4812  vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
4813  const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
4814  VM_CALL_METHOD_ATTR(v,
4815  vm_call_ivar(ec, cfp, calling),
4816  CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
4817  return v;
4818 
4819  case VM_METHOD_TYPE_MISSING:
4820  vm_cc_method_missing_reason_set(cc, 0);
4821  CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4822  return vm_call_method_missing(ec, cfp, calling);
4823 
4824  case VM_METHOD_TYPE_BMETHOD:
4825  CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
4826  return vm_call_bmethod(ec, cfp, calling);
4827 
4828  case VM_METHOD_TYPE_ALIAS:
4829  CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
4830  return vm_call_alias(ec, cfp, calling);
4831 
4832  case VM_METHOD_TYPE_OPTIMIZED:
4833  return vm_call_optimized(ec, cfp, calling, ci, cc);
4834 
4835  case VM_METHOD_TYPE_UNDEF:
4836  break;
4837 
4838  case VM_METHOD_TYPE_ZSUPER:
4839  return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
4840 
4841  case VM_METHOD_TYPE_REFINED:
4842  // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4843  // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4844  return vm_call_refined(ec, cfp, calling);
4845  }
4846 
4847  rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
4848 }
4849 
4850 NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
4851 
4852 static VALUE
4853 vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4854 {
4855  /* method missing */
4856  const struct rb_callinfo *ci = calling->cd->ci;
4857  const int stat = ci_missing_reason(ci);
4858 
4859  if (vm_ci_mid(ci) == idMethodMissing) {
4860  if (UNLIKELY(calling->heap_argv)) {
4861  vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
4862  }
4863  else {
4864  rb_control_frame_t *reg_cfp = cfp;
4865  VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
4866  vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
4867  }
4868  }
4869  else {
4870  return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
4871  }
4872 }
4873 
4874 /* Protected method calls and super invocations need to check that the receiver
4875  * (self for super) inherits the module on which the method is defined.
4876  * In the case of refinements, it should consider the original class not the
4877  * refinement.
4878  */
4879 static VALUE
4880 vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
4881 {
4882  VALUE defined_class = me->defined_class;
4883  VALUE refined_class = RCLASS_REFINED_CLASS(defined_class);
4884  return NIL_P(refined_class) ? defined_class : refined_class;
4885 }
4886 
4887 static inline VALUE
4888 vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
4889 {
4890  const struct rb_callinfo *ci = calling->cd->ci;
4891  const struct rb_callcache *cc = calling->cc;
4892 
4893  VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
4894 
4895  if (vm_cc_cme(cc) != NULL) {
4896  switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
4897  case METHOD_VISI_PUBLIC: /* likely */
4898  return vm_call_method_each_type(ec, cfp, calling);
4899 
4900  case METHOD_VISI_PRIVATE:
4901  if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
4902  enum method_missing_reason stat = MISSING_PRIVATE;
4903  if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
4904 
4905  vm_cc_method_missing_reason_set(cc, stat);
4906  CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
4907  return vm_call_method_missing(ec, cfp, calling);
4908  }
4909  return vm_call_method_each_type(ec, cfp, calling);
4910 
4911  case METHOD_VISI_PROTECTED:
4912  if (!(vm_ci_flag(ci) & (VM_CALL_OPT_SEND | VM_CALL_FCALL))) {
4913  VALUE defined_class = vm_defined_class_for_protected_call(vm_cc_cme(cc));
4914  if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
4915  vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
4916  return vm_call_method_missing(ec, cfp, calling);
4917  }
4918  else {
4919  /* caching method info to dummy cc */
4920  VM_ASSERT(vm_cc_cme(cc) != NULL);
4921  struct rb_callcache cc_on_stack = *cc;
4922  FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
4923  calling->cc = &cc_on_stack;
4924  return vm_call_method_each_type(ec, cfp, calling);
4925  }
4926  }
4927  return vm_call_method_each_type(ec, cfp, calling);
4928 
4929  default:
4930  rb_bug("unreachable");
4931  }
4932  }
4933  else {
4934  return vm_call_method_nome(ec, cfp, calling);
4935  }
4936 }
4937 
4938 static VALUE
4939 vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4940 {
4941  RB_DEBUG_COUNTER_INC(ccf_general);
4942  return vm_call_method(ec, reg_cfp, calling);
4943 }
4944 
4945 void
4946 rb_vm_cc_general(const struct rb_callcache *cc)
4947 {
4948  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
4949  VM_ASSERT(cc != vm_cc_empty());
4950 
4951  *(vm_call_handler *)&cc->call_ = vm_call_general;
4952 }
4953 
4954 static VALUE
4955 vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
4956 {
4957  RB_DEBUG_COUNTER_INC(ccf_super_method);
4958 
4959  // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4960  // can merge the function and the address of the function becomes same.
4961  // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4962  if (ec == NULL) rb_bug("unreachable");
4963 
4964  /* this check is required to distinguish with other functions. */
4965  VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
4966  return vm_call_method(ec, reg_cfp, calling);
4967 }
4968 
4969 /* super */
4970 
4971 static inline VALUE
4972 vm_search_normal_superclass(VALUE klass)
4973 {
4974  if (BUILTIN_TYPE(klass) == T_ICLASS &&
4975  RB_TYPE_P(RBASIC(klass)->klass, T_MODULE) &&
4976  FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
4977  klass = RBASIC(klass)->klass;
4978  }
4979  klass = RCLASS_ORIGIN(klass);
4980  return RCLASS_SUPER(klass);
4981 }
4982 
4983 NORETURN(static void vm_super_outside(void));
4984 
4985 static void
4986 vm_super_outside(void)
4987 {
4988  rb_raise(rb_eNoMethodError, "super called outside of method");
4989 }
4990 
4991 static const struct rb_callcache *
4992 empty_cc_for_super(void)
4993 {
4994  return &vm_empty_cc_for_super;
4995 }
4996 
4997 static const struct rb_callcache *
4998 vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
4999 {
5000  VALUE current_defined_class;
5001  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5002 
5003  if (!me) {
5004  vm_super_outside();
5005  }
5006 
5007  current_defined_class = vm_defined_class_for_protected_call(me);
5008 
5009  if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
5010  reg_cfp->iseq != method_entry_iseqptr(me) &&
5011  !rb_obj_is_kind_of(recv, current_defined_class)) {
5012  VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
5013  RCLASS_INCLUDER(current_defined_class) : current_defined_class;
5014 
5015  if (m) { /* not bound UnboundMethod */
5017  "self has wrong type to call super in this context: "
5018  "%"PRIsVALUE" (expected %"PRIsVALUE")",
5019  rb_obj_class(recv), m);
5020  }
5021  }
5022 
5023  if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
5025  "implicit argument passing of super from method defined"
5026  " by define_method() is not supported."
5027  " Specify all arguments explicitly.");
5028  }
5029 
5030  ID mid = me->def->original_id;
5031 
5032  if (!vm_ci_markable(cd->ci)) {
5033  VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
5034  }
5035  else {
5036  // update iseq. really? (TODO)
5037  cd->ci = vm_ci_new_runtime(mid,
5038  vm_ci_flag(cd->ci),
5039  vm_ci_argc(cd->ci),
5040  vm_ci_kwarg(cd->ci));
5041 
5042  RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
5043  }
5044 
5045  const struct rb_callcache *cc;
5046 
5047  VALUE klass = vm_search_normal_superclass(me->defined_class);
5048 
5049  if (!klass) {
5050  /* bound instance method of module */
5051  cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
5052  RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5053  }
5054  else {
5055  cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
5056  const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
5057 
5058  // define_method can cache for different method id
5059  if (cached_cme == NULL) {
5060  // empty_cc_for_super is not markable object
5061  cd->cc = empty_cc_for_super();
5062  }
5063  else if (cached_cme->called_id != mid) {
5064  const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
5065  if (cme) {
5066  cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
5067  RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
5068  }
5069  else {
5070  cd->cc = cc = empty_cc_for_super();
5071  }
5072  }
5073  else {
5074  switch (cached_cme->def->type) {
5075  // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
5076  case VM_METHOD_TYPE_REFINED:
5077  // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
5078  case VM_METHOD_TYPE_ATTRSET:
5079  case VM_METHOD_TYPE_IVAR:
5080  vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
5081  break;
5082  default:
5083  break; // use fastpath
5084  }
5085  }
5086  }
5087 
5088  VM_ASSERT((vm_cc_cme(cc), true));
5089 
5090  return cc;
5091 }
5092 
5093 /* yield */
5094 
5095 static inline int
5096 block_proc_is_lambda(const VALUE procval)
5097 {
5098  rb_proc_t *proc;
5099 
5100  if (procval) {
5101  GetProcPtr(procval, proc);
5102  return proc->is_lambda;
5103  }
5104  else {
5105  return 0;
5106  }
5107 }
5108 
5109 static VALUE
5110 vm_yield_with_cfunc(rb_execution_context_t *ec,
5111  const struct rb_captured_block *captured,
5112  VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
5113  const rb_callable_method_entry_t *me)
5114 {
5115  int is_lambda = FALSE; /* TODO */
5116  VALUE val, arg, blockarg;
5117  int frame_flag;
5118  const struct vm_ifunc *ifunc = captured->code.ifunc;
5119 
5120  if (is_lambda) {
5121  arg = rb_ary_new4(argc, argv);
5122  }
5123  else if (argc == 0) {
5124  arg = Qnil;
5125  }
5126  else {
5127  arg = argv[0];
5128  }
5129 
5130  blockarg = rb_vm_bh_to_procval(ec, block_handler);
5131 
5132  frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
5133  if (kw_splat) {
5134  frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
5135  }
5136 
5137  vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
5138  frame_flag,
5139  self,
5140  VM_GUARDED_PREV_EP(captured->ep),
5141  (VALUE)me,
5142  0, ec->cfp->sp, 0, 0);
5143  val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
5144  rb_vm_pop_frame(ec);
5145 
5146  return val;
5147 }
5148 
5149 VALUE
5150 rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
5151 {
5152  return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
5153 }
5154 
5155 static VALUE
5156 vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
5157 {
5158  return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
5159 }
5160 
5161 static inline int
5162 vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
5163 {
5164  int i;
5165  long len = RARRAY_LEN(ary);
5166 
5167  CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5168 
5169  for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
5170  argv[i] = RARRAY_AREF(ary, i);
5171  }
5172 
5173  return i;
5174 }
5175 
5176 static inline VALUE
5177 vm_callee_setup_block_arg_arg0_check(VALUE *argv)
5178 {
5179  VALUE ary, arg0 = argv[0];
5180  ary = rb_check_array_type(arg0);
5181 #if 0
5182  argv[0] = arg0;
5183 #else
5184  VM_ASSERT(argv[0] == arg0);
5185 #endif
5186  return ary;
5187 }
5188 
5189 static int
5190 vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
5191 {
5192  if (rb_simple_iseq_p(iseq)) {
5193  rb_control_frame_t *cfp = ec->cfp;
5194  VALUE arg0;
5195 
5196  CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
5197 
5198  if (arg_setup_type == arg_setup_block &&
5199  calling->argc == 1 &&
5200  ISEQ_BODY(iseq)->param.flags.has_lead &&
5201  !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
5202  !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
5203  calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
5204  }
5205 
5206  if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
5207  if (arg_setup_type == arg_setup_block) {
5208  if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
5209  int i;
5210  CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
5211  for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
5212  calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
5213  }
5214  else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
5215  calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
5216  }
5217  }
5218  else {
5219  argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
5220  }
5221  }
5222 
5223  return 0;
5224  }
5225  else {
5226  return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
5227  }
5228 }
5229 
5230 static int
5231 vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
5232 {
5233  struct rb_calling_info calling_entry, *calling;
5234 
5235  calling = &calling_entry;
5236  calling->argc = argc;
5237  calling->block_handler = block_handler;
5238  calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
5239  calling->recv = Qundef;
5240  calling->heap_argv = 0;
5241  struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
5242 
5243  return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
5244 }
5245 
5246 /* ruby iseq -> ruby block */
5247 
5248 static VALUE
5249 vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5250  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5251  bool is_lambda, VALUE block_handler)
5252 {
5253  const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
5254  const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
5255  const int arg_size = ISEQ_BODY(iseq)->param.size;
5256  VALUE * const rsp = GET_SP() - calling->argc;
5257  VALUE * const argv = rsp;
5258  int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
5259 
5260  SET_SP(rsp);
5261 
5262  vm_push_frame(ec, iseq,
5263  VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
5264  captured->self,
5265  VM_GUARDED_PREV_EP(captured->ep), 0,
5266  ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
5267  rsp + arg_size,
5268  ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
5269 
5270  return Qundef;
5271 }
5272 
5273 static VALUE
5274 vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5275  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5276  MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5277 {
5278  VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
5279  int flags = vm_ci_flag(ci);
5280 
5281  if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
5282  ((calling->argc == 0) ||
5283  (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
5284  (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
5285  ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
5286  CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
5287  flags = 0;
5288  if (UNLIKELY(calling->heap_argv)) {
5289 #if VM_ARGC_STACK_MAX < 0
5290  if (RARRAY_LEN(calling->heap_argv) < 1) {
5291  rb_raise(rb_eArgError, "no receiver given");
5292  }
5293 #endif
5294  calling->recv = rb_ary_shift(calling->heap_argv);
5295  // Modify stack to avoid cfp consistency error
5296  reg_cfp->sp++;
5297  reg_cfp->sp[-1] = reg_cfp->sp[-2];
5298  reg_cfp->sp[-2] = calling->recv;
5299  flags |= VM_CALL_ARGS_SPLAT;
5300  }
5301  else {
5302  if (calling->argc < 1) {
5303  rb_raise(rb_eArgError, "no receiver given");
5304  }
5305  calling->recv = TOPN(--calling->argc);
5306  }
5307  if (calling->kw_splat) {
5308  flags |= VM_CALL_KW_SPLAT;
5309  }
5310  }
5311  else {
5312  if (calling->argc < 1) {
5313  rb_raise(rb_eArgError, "no receiver given");
5314  }
5315  calling->recv = TOPN(--calling->argc);
5316  }
5317 
5318  return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
5319 }
5320 
5321 static VALUE
5322 vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5323  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5324  MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
5325 {
5326  VALUE val;
5327  int argc;
5328  const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
5329  CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
5330  argc = calling->argc;
5331  val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
5332  POPN(argc); /* TODO: should put before C/yield? */
5333  return val;
5334 }
5335 
5336 static VALUE
5337 vm_proc_to_block_handler(VALUE procval)
5338 {
5339  const struct rb_block *block = vm_proc_block(procval);
5340 
5341  switch (vm_block_type(block)) {
5342  case block_type_iseq:
5343  return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
5344  case block_type_ifunc:
5345  return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
5346  case block_type_symbol:
5347  return VM_BH_FROM_SYMBOL(block->as.symbol);
5348  case block_type_proc:
5349  return VM_BH_FROM_PROC(block->as.proc);
5350  }
5351  VM_UNREACHABLE(vm_yield_with_proc);
5352  return Qundef;
5353 }
5354 
5355 static VALUE
5356 vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5357  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5358  bool is_lambda, VALUE block_handler)
5359 {
5360  while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
5361  VALUE proc = VM_BH_TO_PROC(block_handler);
5362  is_lambda = block_proc_is_lambda(proc);
5363  block_handler = vm_proc_to_block_handler(proc);
5364  }
5365 
5366  return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5367 }
5368 
5369 static inline VALUE
5370 vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5371  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5372  bool is_lambda, VALUE block_handler)
5373 {
5374  VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
5375  struct rb_calling_info *calling, const struct rb_callinfo *ci,
5376  bool is_lambda, VALUE block_handler);
5377 
5378  switch (vm_block_handler_type(block_handler)) {
5379  case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
5380  case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
5381  case block_handler_type_proc: func = vm_invoke_proc_block; break;
5382  case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
5383  default: rb_bug("vm_invoke_block: unreachable");
5384  }
5385 
5386  return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
5387 }
5388 
5389 static VALUE
5390 vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
5391 {
5392  const rb_execution_context_t *ec = GET_EC();
5393  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5394  struct rb_captured_block *captured;
5395 
5396  if (cfp == 0) {
5397  rb_bug("vm_make_proc_with_iseq: unreachable");
5398  }
5399 
5400  captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
5401  captured->code.iseq = blockiseq;
5402 
5403  return rb_vm_make_proc(ec, captured, rb_cProc);
5404 }
5405 
5406 static VALUE
5407 vm_once_exec(VALUE iseq)
5408 {
5409  VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
5410  return rb_proc_call_with_block(proc, 0, 0, Qnil);
5411 }
5412 
5413 static VALUE
5414 vm_once_clear(VALUE data)
5415 {
5416  union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
5417  is->once.running_thread = NULL;
5418  return Qnil;
5419 }
5420 
5421 /* defined insn */
5422 
5423 static bool
5424 check_respond_to_missing(VALUE obj, VALUE v)
5425 {
5426  VALUE args[2];
5427  VALUE r;
5428 
5429  args[0] = obj; args[1] = Qfalse;
5430  r = rb_check_funcall(v, idRespond_to_missing, 2, args);
5431  if (!UNDEF_P(r) && RTEST(r)) {
5432  return true;
5433  }
5434  else {
5435  return false;
5436  }
5437 }
5438 
5439 static bool
5440 vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5441 {
5442  VALUE klass;
5443  enum defined_type type = (enum defined_type)op_type;
5444 
5445  switch (type) {
5446  case DEFINED_IVAR:
5447  return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
5448  break;
5449  case DEFINED_GVAR:
5450  return rb_gvar_defined(SYM2ID(obj));
5451  break;
5452  case DEFINED_CVAR: {
5453  const rb_cref_t *cref = vm_get_cref(GET_EP());
5454  klass = vm_get_cvar_base(cref, GET_CFP(), 0);
5455  return rb_cvar_defined(klass, SYM2ID(obj));
5456  break;
5457  }
5458  case DEFINED_CONST:
5459  case DEFINED_CONST_FROM: {
5460  bool allow_nil = type == DEFINED_CONST;
5461  klass = v;
5462  return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
5463  break;
5464  }
5465  case DEFINED_FUNC:
5466  klass = CLASS_OF(v);
5467  return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
5468  break;
5469  case DEFINED_METHOD:{
5470  VALUE klass = CLASS_OF(v);
5471  const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
5472 
5473  if (me) {
5474  switch (METHOD_ENTRY_VISI(me)) {
5475  case METHOD_VISI_PRIVATE:
5476  break;
5477  case METHOD_VISI_PROTECTED:
5478  if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
5479  break;
5480  }
5481  case METHOD_VISI_PUBLIC:
5482  return true;
5483  break;
5484  default:
5485  rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
5486  }
5487  }
5488  else {
5489  return check_respond_to_missing(obj, v);
5490  }
5491  break;
5492  }
5493  case DEFINED_YIELD:
5494  if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
5495  return true;
5496  }
5497  break;
5498  case DEFINED_ZSUPER:
5499  {
5500  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
5501 
5502  if (me) {
5503  VALUE klass = vm_search_normal_superclass(me->defined_class);
5504  if (!klass) return false;
5505 
5506  ID id = me->def->original_id;
5507 
5508  return rb_method_boundp(klass, id, 0);
5509  }
5510  }
5511  break;
5512  case DEFINED_REF:
5513  return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
5514  default:
5515  rb_bug("unimplemented defined? type (VM)");
5516  break;
5517  }
5518 
5519  return false;
5520 }
5521 
5522 bool
5523 rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
5524 {
5525  return vm_defined(ec, reg_cfp, op_type, obj, v);
5526 }
5527 
5528 static const VALUE *
5529 vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
5530 {
5531  rb_num_t i;
5532  const VALUE *ep = reg_ep;
5533  for (i = 0; i < lv; i++) {
5534  ep = GET_PREV_EP(ep);
5535  }
5536  return ep;
5537 }
5538 
5539 static VALUE
5540 vm_get_special_object(const VALUE *const reg_ep,
5541  enum vm_special_object_type type)
5542 {
5543  switch (type) {
5544  case VM_SPECIAL_OBJECT_VMCORE:
5545  return rb_mRubyVMFrozenCore;
5546  case VM_SPECIAL_OBJECT_CBASE:
5547  return vm_get_cbase(reg_ep);
5548  case VM_SPECIAL_OBJECT_CONST_BASE:
5549  return vm_get_const_base(reg_ep);
5550  default:
5551  rb_bug("putspecialobject insn: unknown value_type %d", type);
5552  }
5553 }
5554 
5555 static VALUE
5556 vm_concat_array(VALUE ary1, VALUE ary2st)
5557 {
5558  const VALUE ary2 = ary2st;
5559  VALUE tmp1 = rb_check_to_array(ary1);
5560  VALUE tmp2 = rb_check_to_array(ary2);
5561 
5562  if (NIL_P(tmp1)) {
5563  tmp1 = rb_ary_new3(1, ary1);
5564  }
5565  if (tmp1 == ary1) {
5566  tmp1 = rb_ary_dup(ary1);
5567  }
5568 
5569  if (NIL_P(tmp2)) {
5570  return rb_ary_push(tmp1, ary2);
5571  } else {
5572  return rb_ary_concat(tmp1, tmp2);
5573  }
5574 }
5575 
5576 static VALUE
5577 vm_concat_to_array(VALUE ary1, VALUE ary2st)
5578 {
5579  /* ary1 must be a newly created array */
5580  const VALUE ary2 = ary2st;
5581  VALUE tmp2 = rb_check_to_array(ary2);
5582 
5583  if (NIL_P(tmp2)) {
5584  return rb_ary_push(ary1, ary2);
5585  } else {
5586  return rb_ary_concat(ary1, tmp2);
5587  }
5588 }
5589 
5590 // YJIT implementation is using the C function
5591 // and needs to call a non-static function
5592 VALUE
5593 rb_vm_concat_array(VALUE ary1, VALUE ary2st)
5594 {
5595  return vm_concat_array(ary1, ary2st);
5596 }
5597 
5598 VALUE
5599 rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
5600 {
5601  return vm_concat_to_array(ary1, ary2st);
5602 }
5603 
5604 static VALUE
5605 vm_splat_array(VALUE flag, VALUE ary)
5606 {
5607  VALUE tmp = rb_check_to_array(ary);
5608  if (NIL_P(tmp)) {
5609  return rb_ary_new3(1, ary);
5610  }
5611  else if (RTEST(flag)) {
5612  return rb_ary_dup(tmp);
5613  }
5614  else {
5615  return tmp;
5616  }
5617 }
5618 
5619 // YJIT implementation is using the C function
5620 // and needs to call a non-static function
5621 VALUE
5622 rb_vm_splat_array(VALUE flag, VALUE ary)
5623 {
5624  return vm_splat_array(flag, ary);
5625 }
5626 
5627 static VALUE
5628 vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5629 {
5630  enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
5631 
5632  if (flag & VM_CHECKMATCH_ARRAY) {
5633  long i;
5634  const long n = RARRAY_LEN(pattern);
5635 
5636  for (i = 0; i < n; i++) {
5637  VALUE v = RARRAY_AREF(pattern, i);
5638  VALUE c = check_match(ec, v, target, type);
5639 
5640  if (RTEST(c)) {
5641  return c;
5642  }
5643  }
5644  return Qfalse;
5645  }
5646  else {
5647  return check_match(ec, pattern, target, type);
5648  }
5649 }
5650 
5651 VALUE
5652 rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
5653 {
5654  return vm_check_match(ec, target, pattern, flag);
5655 }
5656 
5657 static VALUE
5658 vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
5659 {
5660  const VALUE kw_bits = *(ep - bits);
5661 
5662  if (FIXNUM_P(kw_bits)) {
5663  unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
5664  if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
5665  return Qfalse;
5666  }
5667  else {
5668  VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
5669  if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
5670  }
5671  return Qtrue;
5672 }
5673 
5674 static void
5675 vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
5676 {
5677  if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5678  RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5679  RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5680  RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5681 
5682  switch (flag) {
5683  case RUBY_EVENT_CALL:
5684  RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
5685  return;
5686  case RUBY_EVENT_C_CALL:
5687  RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
5688  return;
5689  case RUBY_EVENT_RETURN:
5690  RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
5691  return;
5692  case RUBY_EVENT_C_RETURN:
5693  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
5694  return;
5695  }
5696  }
5697 }
5698 
5699 static VALUE
5700 vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
5701 {
5702  if (!rb_const_defined_at(cbase, id)) {
5703  return 0;
5704  }
5705  else if (VM_DEFINECLASS_SCOPED_P(flags)) {
5706  return rb_public_const_get_at(cbase, id);
5707  }
5708  else {
5709  return rb_const_get_at(cbase, id);
5710  }
5711 }
5712 
5713 static VALUE
5714 vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
5715 {
5716  if (!RB_TYPE_P(klass, T_CLASS)) {
5717  return 0;
5718  }
5719  else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
5720  VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
5721 
5722  if (tmp != super) {
5724  "superclass mismatch for class %"PRIsVALUE"",
5725  rb_id2str(id));
5726  }
5727  else {
5728  return klass;
5729  }
5730  }
5731  else {
5732  return klass;
5733  }
5734 }
5735 
5736 static VALUE
5737 vm_check_if_module(ID id, VALUE mod)
5738 {
5739  if (!RB_TYPE_P(mod, T_MODULE)) {
5740  return 0;
5741  }
5742  else {
5743  return mod;
5744  }
5745 }
5746 
5747 static VALUE
5748 declare_under(ID id, VALUE cbase, VALUE c)
5749 {
5750  rb_set_class_path_string(c, cbase, rb_id2str(id));
5751  rb_const_set(cbase, id, c);
5752  return c;
5753 }
5754 
5755 static VALUE
5756 vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5757 {
5758  /* new class declaration */
5759  VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
5760  VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
5762  rb_class_inherited(s, c);
5763  return c;
5764 }
5765 
5766 static VALUE
5767 vm_declare_module(ID id, VALUE cbase)
5768 {
5769  /* new module declaration */
5770  return declare_under(id, cbase, rb_module_new());
5771 }
5772 
5773 NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
5774 static void
5775 unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
5776 {
5777  VALUE name = rb_id2str(id);
5778  VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
5779  name, type);
5780  VALUE location = rb_const_source_location_at(cbase, id);
5781  if (!NIL_P(location)) {
5782  rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
5783  " previous definition of %"PRIsVALUE" was here",
5784  rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
5785  }
5787 }
5788 
5789 static VALUE
5790 vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
5791 {
5792  VALUE klass;
5793 
5794  if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
5796  "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
5797  rb_obj_class(super));
5798  }
5799 
5800  vm_check_if_namespace(cbase);
5801 
5802  /* find klass */
5803  rb_autoload_load(cbase, id);
5804  if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
5805  if (!vm_check_if_class(id, flags, super, klass))
5806  unmatched_redefinition("class", cbase, id, klass);
5807  return klass;
5808  }
5809  else {
5810  return vm_declare_class(id, flags, cbase, super);
5811  }
5812 }
5813 
5814 static VALUE
5815 vm_define_module(ID id, rb_num_t flags, VALUE cbase)
5816 {
5817  VALUE mod;
5818 
5819  vm_check_if_namespace(cbase);
5820  if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
5821  if (!vm_check_if_module(id, mod))
5822  unmatched_redefinition("module", cbase, id, mod);
5823  return mod;
5824  }
5825  else {
5826  return vm_declare_module(id, cbase);
5827  }
5828 }
5829 
5830 static VALUE
5831 vm_find_or_create_class_by_id(ID id,
5832  rb_num_t flags,
5833  VALUE cbase,
5834  VALUE super)
5835 {
5836  rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
5837 
5838  switch (type) {
5839  case VM_DEFINECLASS_TYPE_CLASS:
5840  /* classdef returns class scope value */
5841  return vm_define_class(id, flags, cbase, super);
5842 
5843  case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
5844  /* classdef returns class scope value */
5845  return rb_singleton_class(cbase);
5846 
5847  case VM_DEFINECLASS_TYPE_MODULE:
5848  /* classdef returns class scope value */
5849  return vm_define_module(id, flags, cbase);
5850 
5851  default:
5852  rb_bug("unknown defineclass type: %d", (int)type);
5853  }
5854 }
5855 
5856 static rb_method_visibility_t
5857 vm_scope_visibility_get(const rb_execution_context_t *ec)
5858 {
5859  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5860 
5861  if (!vm_env_cref_by_cref(cfp->ep)) {
5862  return METHOD_VISI_PUBLIC;
5863  }
5864  else {
5865  return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
5866  }
5867 }
5868 
5869 static int
5870 vm_scope_module_func_check(const rb_execution_context_t *ec)
5871 {
5872  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
5873 
5874  if (!vm_env_cref_by_cref(cfp->ep)) {
5875  return FALSE;
5876  }
5877  else {
5878  return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
5879  }
5880 }
5881 
5882 static void
5883 vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
5884 {
5885  VALUE klass;
5886  rb_method_visibility_t visi;
5887  rb_cref_t *cref = vm_ec_cref(ec);
5888 
5889  if (is_singleton) {
5890  klass = rb_singleton_class(obj); /* class and frozen checked in this API */
5891  visi = METHOD_VISI_PUBLIC;
5892  }
5893  else {
5894  klass = CREF_CLASS_FOR_DEFINITION(cref);
5895  visi = vm_scope_visibility_get(ec);
5896  }
5897 
5898  if (NIL_P(klass)) {
5899  rb_raise(rb_eTypeError, "no class/module to add method");
5900  }
5901 
5902  rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
5903  // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5904  if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
5905 
5906  RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
5907  }
5908 
5909  if (!is_singleton && vm_scope_module_func_check(ec)) {
5910  klass = rb_singleton_class(klass);
5911  rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
5912  }
5913 }
5914 
5915 static VALUE
5916 vm_invokeblock_i(struct rb_execution_context_struct *ec,
5917  struct rb_control_frame_struct *reg_cfp,
5918  struct rb_calling_info *calling)
5919 {
5920  const struct rb_callinfo *ci = calling->cd->ci;
5921  VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
5922 
5923  if (block_handler == VM_BLOCK_HANDLER_NONE) {
5924  rb_vm_localjump_error("no block given (yield)", Qnil, 0);
5925  }
5926  else {
5927  return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
5928  }
5929 }
5930 
5931 enum method_explorer_type {
5932  mexp_search_method,
5933  mexp_search_invokeblock,
5934  mexp_search_super,
5935 };
5936 
5937 static inline VALUE
5938 vm_sendish(
5939  struct rb_execution_context_struct *ec,
5940  struct rb_control_frame_struct *reg_cfp,
5941  struct rb_call_data *cd,
5942  VALUE block_handler,
5943  enum method_explorer_type method_explorer
5944 ) {
5945  VALUE val = Qundef;
5946  const struct rb_callinfo *ci = cd->ci;
5947  const struct rb_callcache *cc;
5948  int argc = vm_ci_argc(ci);
5949  VALUE recv = TOPN(argc);
5950  struct rb_calling_info calling = {
5951  .block_handler = block_handler,
5952  .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
5953  .recv = recv,
5954  .argc = argc,
5955  .cd = cd,
5956  };
5957 
5958  switch (method_explorer) {
5959  case mexp_search_method:
5960  calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
5961  val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5962  break;
5963  case mexp_search_super:
5964  calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
5965  val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
5966  break;
5967  case mexp_search_invokeblock:
5968  val = vm_invokeblock_i(ec, GET_CFP(), &calling);
5969  break;
5970  }
5971  return val;
5972 }
5973 
5974 VALUE
5975 rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
5976 {
5977  stack_check(ec);
5978 
5979  struct rb_forwarding_call_data adjusted_cd;
5980  struct rb_callinfo adjusted_ci;
5981 
5982  VALUE bh;
5983  VALUE val;
5984 
5985  if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
5986  bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
5987 
5988  val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
5989 
5990  if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
5991  RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
5992  }
5993  }
5994  else {
5995  bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
5996  val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
5997  }
5998 
5999  VM_EXEC(ec, val);
6000  return val;
6001 }
6002 
6003 VALUE
6004 rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6005 {
6006  stack_check(ec);
6007  VALUE bh = VM_BLOCK_HANDLER_NONE;
6008  VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
6009  VM_EXEC(ec, val);
6010  return val;
6011 }
6012 
6013 VALUE
6014 rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
6015 {
6016  stack_check(ec);
6017  struct rb_forwarding_call_data adjusted_cd;
6018  struct rb_callinfo adjusted_ci;
6019 
6020  VALUE bh;
6021  VALUE val;
6022 
6023  if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
6024  bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
6025 
6026  val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
6027 
6028  if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
6029  RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
6030  }
6031  }
6032  else {
6033  bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
6034  val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
6035  }
6036 
6037  VM_EXEC(ec, val);
6038  return val;
6039 }
6040 
6041 VALUE
6042 rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
6043 {
6044  stack_check(ec);
6045  VALUE bh = VM_BLOCK_HANDLER_NONE;
6046  VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
6047  VM_EXEC(ec, val);
6048  return val;
6049 }
6050 
6051 /* object.c */
6052 VALUE rb_nil_to_s(VALUE);
6053 VALUE rb_true_to_s(VALUE);
6054 VALUE rb_false_to_s(VALUE);
6055 /* numeric.c */
6056 VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
6057 VALUE rb_fix_to_s(VALUE);
6058 /* variable.c */
6059 VALUE rb_mod_to_s(VALUE);
6061 
6062 static VALUE
6063 vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
6064 {
6065  int type = TYPE(recv);
6066  if (type == T_STRING) {
6067  return recv;
6068  }
6069 
6070  const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
6071 
6072  switch (type) {
6073  case T_SYMBOL:
6074  if (check_cfunc(vm_cc_cme(cc), rb_sym_to_s)) {
6075  // rb_sym_to_s() allocates a mutable string, but since we are only
6076  // going to use this string for interpolation, it's fine to use the
6077  // frozen string.
6078  return rb_sym2str(recv);
6079  }
6080  break;
6081  case T_MODULE:
6082  case T_CLASS:
6083  if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
6084  // rb_mod_to_s() allocates a mutable string, but since we are only
6085  // going to use this string for interpolation, it's fine to use the
6086  // frozen string.
6087  VALUE val = rb_mod_name(recv);
6088  if (NIL_P(val)) {
6089  val = rb_mod_to_s(recv);
6090  }
6091  return val;
6092  }
6093  break;
6094  case T_NIL:
6095  if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
6096  return rb_nil_to_s(recv);
6097  }
6098  break;
6099  case T_TRUE:
6100  if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
6101  return rb_true_to_s(recv);
6102  }
6103  break;
6104  case T_FALSE:
6105  if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
6106  return rb_false_to_s(recv);
6107  }
6108  break;
6109  case T_FIXNUM:
6110  if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
6111  return rb_fix_to_s(recv);
6112  }
6113  break;
6114  }
6115  return Qundef;
6116 }
6117 
6118 static VALUE
6119 vm_opt_ary_freeze(VALUE ary, int bop, ID id)
6120 {
6121  if (BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6122  return ary;
6123  }
6124  else {
6125  return Qundef;
6126  }
6127 }
6128 
6129 static VALUE
6130 vm_opt_hash_freeze(VALUE hash, int bop, ID id)
6131 {
6132  if (BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6133  return hash;
6134  }
6135  else {
6136  return Qundef;
6137  }
6138 }
6139 
6140 static VALUE
6141 vm_opt_str_freeze(VALUE str, int bop, ID id)
6142 {
6143  if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6144  return str;
6145  }
6146  else {
6147  return Qundef;
6148  }
6149 }
6150 
6151 /* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
6152 #define id_cmp idCmp
6153 
6154 static VALUE
6155 vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6156 {
6157  if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
6158  if (num == 0) {
6159  return Qnil;
6160  }
6161  else {
6162  VALUE result = *ptr;
6163  rb_snum_t i = num - 1;
6164  while (i-- > 0) {
6165  const VALUE v = *++ptr;
6166  if (OPTIMIZED_CMP(v, result) > 0) {
6167  result = v;
6168  }
6169  }
6170  return result;
6171  }
6172  }
6173  else {
6174  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
6175  }
6176 }
6177 
6178 VALUE
6179 rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6180 {
6181  return vm_opt_newarray_max(ec, num, ptr);
6182 }
6183 
6184 static VALUE
6185 vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6186 {
6187  if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
6188  if (num == 0) {
6189  return Qnil;
6190  }
6191  else {
6192  VALUE result = *ptr;
6193  rb_snum_t i = num - 1;
6194  while (i-- > 0) {
6195  const VALUE v = *++ptr;
6196  if (OPTIMIZED_CMP(v, result) < 0) {
6197  result = v;
6198  }
6199  }
6200  return result;
6201  }
6202  }
6203  else {
6204  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
6205  }
6206 }
6207 
6208 VALUE
6209 rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6210 {
6211  return vm_opt_newarray_min(ec, num, ptr);
6212 }
6213 
6214 static VALUE
6215 vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6216 {
6217  // If Array#hash is _not_ monkeypatched, use the optimized call
6218  if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
6219  return rb_ary_hash_values(num, ptr);
6220  }
6221  else {
6222  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
6223  }
6224 }
6225 
6226 VALUE
6227 rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
6228 {
6229  return vm_opt_newarray_hash(ec, num, ptr);
6230 }
6231 
6232 VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len);
6233 VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
6234 
6235 static VALUE
6236 vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6237 {
6238  if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
6239  struct RArray fake_ary;
6240  VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num);
6241  return rb_ec_pack_ary(ec, ary, fmt, (UNDEF_P(buffer) ? Qnil : buffer));
6242  }
6243  else {
6244  // The opt_newarray_send insn drops the keyword args so we need to rebuild them.
6245  // Setup an array with room for keyword hash.
6246  VALUE args[2];
6247  args[0] = fmt;
6248  int kw_splat = RB_NO_KEYWORDS;
6249  int argc = 1;
6250 
6251  if (!UNDEF_P(buffer)) {
6252  args[1] = rb_hash_new_with_size(1);
6253  rb_hash_aset(args[1], ID2SYM(idBuffer), buffer);
6254  kw_splat = RB_PASS_KEYWORDS;
6255  argc++;
6256  }
6257 
6258  return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, argc, args, kw_splat);
6259  }
6260 }
6261 
6262 VALUE
6263 rb_vm_opt_newarray_pack_buffer(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt, VALUE buffer)
6264 {
6265  return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, buffer);
6266 }
6267 
6268 VALUE
6269 rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
6270 {
6271  return vm_opt_newarray_pack_buffer(ec, num, ptr, fmt, Qundef);
6272 }
6273 
6274 #undef id_cmp
6275 
6276 static void
6277 vm_track_constant_cache(ID id, void *ic)
6278 {
6279  struct rb_id_table *const_cache = GET_VM()->constant_cache;
6280  VALUE lookup_result;
6281  st_table *ics;
6282 
6283  if (rb_id_table_lookup(const_cache, id, &lookup_result)) {
6284  ics = (st_table *)lookup_result;
6285  }
6286  else {
6287  ics = st_init_numtable();
6288  rb_id_table_insert(const_cache, id, (VALUE)ics);
6289  }
6290 
6291  st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
6292 }
6293 
6294 static void
6295 vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
6296 {
6297  RB_VM_LOCK_ENTER();
6298 
6299  for (int i = 0; segments[i]; i++) {
6300  ID id = segments[i];
6301  if (id == idNULL) continue;
6302  vm_track_constant_cache(id, ic);
6303  }
6304 
6305  RB_VM_LOCK_LEAVE();
6306 }
6307 
6308 // For RJIT inlining
6309 static inline bool
6310 vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
6311 {
6312  if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
6313  VM_ASSERT(ractor_incidental_shareable_p(flags & IMEMO_CONST_CACHE_SHAREABLE, value));
6314 
6315  return (ic_cref == NULL || // no need to check CREF
6316  ic_cref == vm_get_cref(reg_ep));
6317  }
6318  return false;
6319 }
6320 
6321 static bool
6322 vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
6323 {
6324  VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
6325  return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
6326 }
6327 
6328 // YJIT needs this function to never allocate and never raise
6329 bool
6330 rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
6331 {
6332  return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
6333 }
6334 
6335 static void
6336 vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const VALUE *pc)
6337 {
6338  if (ruby_vm_const_missing_count > 0) {
6339  ruby_vm_const_missing_count = 0;
6340  ic->entry = NULL;
6341  return;
6342  }
6343 
6344  struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
6345  RB_OBJ_WRITE(ice, &ice->value, val);
6346  ice->ic_cref = vm_get_const_key_cref(reg_ep);
6347  if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
6348  RB_OBJ_WRITE(iseq, &ic->entry, ice);
6349 
6350  RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
6351  unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
6352  rb_yjit_constant_ic_update(iseq, ic, pos);
6353  rb_rjit_constant_ic_update(iseq, ic, pos);
6354 }
6355 
6356 VALUE
6357 rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
6358 {
6359  VALUE val;
6360  const ID *segments = ic->segments;
6361  struct iseq_inline_constant_cache_entry *ice = ic->entry;
6362  if (ice && vm_ic_hit_p(ice, GET_EP())) {
6363  val = ice->value;
6364 
6365  VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
6366  }
6367  else {
6368  ruby_vm_constant_cache_misses++;
6369  val = vm_get_ev_const_chain(ec, segments);
6370  vm_ic_track_const_chain(GET_CFP(), ic, segments);
6371  // Undo the PC increment to get the address to this instruction
6372  // INSN_ATTR(width) == 2
6373  vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
6374  }
6375  return val;
6376 }
6377 
6378 static VALUE
6379 vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
6380 {
6381  rb_thread_t *th = rb_ec_thread_ptr(ec);
6382  rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
6383 
6384  again:
6385  if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
6386  return is->once.value;
6387  }
6388  else if (is->once.running_thread == NULL) {
6389  VALUE val;
6390  is->once.running_thread = th;
6391  val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
6392  RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
6393  /* is->once.running_thread is cleared by vm_once_clear() */
6394  is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
6395  return val;
6396  }
6397  else if (is->once.running_thread == th) {
6398  /* recursive once */
6399  return vm_once_exec((VALUE)iseq);
6400  }
6401  else {
6402  /* waiting for finish */
6403  RUBY_VM_CHECK_INTS(ec);
6405  goto again;
6406  }
6407 }
6408 
6409 static OFFSET
6410 vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
6411 {
6412  switch (OBJ_BUILTIN_TYPE(key)) {
6413  case -1:
6414  case T_FLOAT:
6415  case T_SYMBOL:
6416  case T_BIGNUM:
6417  case T_STRING:
6418  if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
6419  SYMBOL_REDEFINED_OP_FLAG |
6420  INTEGER_REDEFINED_OP_FLAG |
6421  FLOAT_REDEFINED_OP_FLAG |
6422  NIL_REDEFINED_OP_FLAG |
6423  TRUE_REDEFINED_OP_FLAG |
6424  FALSE_REDEFINED_OP_FLAG |
6425  STRING_REDEFINED_OP_FLAG)) {
6426  st_data_t val;
6427  if (RB_FLOAT_TYPE_P(key)) {
6428  double kval = RFLOAT_VALUE(key);
6429  if (!isinf(kval) && modf(kval, &kval) == 0.0) {
6430  key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
6431  }
6432  }
6433  if (rb_hash_stlike_lookup(hash, key, &val)) {
6434  return FIX2LONG((VALUE)val);
6435  }
6436  else {
6437  return else_offset;
6438  }
6439  }
6440  }
6441  return 0;
6442 }
6443 
6444 NORETURN(static void
6445  vm_stack_consistency_error(const rb_execution_context_t *ec,
6446  const rb_control_frame_t *,
6447  const VALUE *));
6448 static void
6449 vm_stack_consistency_error(const rb_execution_context_t *ec,
6450  const rb_control_frame_t *cfp,
6451  const VALUE *bp)
6452 {
6453  const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
6454  const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
6455  static const char stack_consistency_error[] =
6456  "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
6457 #if defined RUBY_DEVEL
6458  VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
6459  rb_str_cat_cstr(mesg, "\n");
6460  rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
6462 #else
6463  rb_bug(stack_consistency_error, nsp, nbp);
6464 #endif
6465 }
6466 
6467 static VALUE
6468 vm_opt_plus(VALUE recv, VALUE obj)
6469 {
6470  if (FIXNUM_2_P(recv, obj) &&
6471  BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
6472  return rb_fix_plus_fix(recv, obj);
6473  }
6474  else if (FLONUM_2_P(recv, obj) &&
6475  BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6476  return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6477  }
6478  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6479  return Qundef;
6480  }
6481  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6482  RBASIC_CLASS(obj) == rb_cFloat &&
6483  BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
6484  return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
6485  }
6486  else if (RBASIC_CLASS(recv) == rb_cString &&
6487  RBASIC_CLASS(obj) == rb_cString &&
6488  BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
6489  return rb_str_opt_plus(recv, obj);
6490  }
6491  else if (RBASIC_CLASS(recv) == rb_cArray &&
6492  RBASIC_CLASS(obj) == rb_cArray &&
6493  BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
6494  return rb_ary_plus(recv, obj);
6495  }
6496  else {
6497  return Qundef;
6498  }
6499 }
6500 
6501 static VALUE
6502 vm_opt_minus(VALUE recv, VALUE obj)
6503 {
6504  if (FIXNUM_2_P(recv, obj) &&
6505  BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
6506  return rb_fix_minus_fix(recv, obj);
6507  }
6508  else if (FLONUM_2_P(recv, obj) &&
6509  BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6510  return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6511  }
6512  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6513  return Qundef;
6514  }
6515  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6516  RBASIC_CLASS(obj) == rb_cFloat &&
6517  BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
6518  return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
6519  }
6520  else {
6521  return Qundef;
6522  }
6523 }
6524 
6525 static VALUE
6526 vm_opt_mult(VALUE recv, VALUE obj)
6527 {
6528  if (FIXNUM_2_P(recv, obj) &&
6529  BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
6530  return rb_fix_mul_fix(recv, obj);
6531  }
6532  else if (FLONUM_2_P(recv, obj) &&
6533  BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6534  return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6535  }
6536  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6537  return Qundef;
6538  }
6539  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6540  RBASIC_CLASS(obj) == rb_cFloat &&
6541  BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
6542  return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
6543  }
6544  else {
6545  return Qundef;
6546  }
6547 }
6548 
6549 static VALUE
6550 vm_opt_div(VALUE recv, VALUE obj)
6551 {
6552  if (FIXNUM_2_P(recv, obj) &&
6553  BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
6554  return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
6555  }
6556  else if (FLONUM_2_P(recv, obj) &&
6557  BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6558  return rb_flo_div_flo(recv, obj);
6559  }
6560  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6561  return Qundef;
6562  }
6563  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6564  RBASIC_CLASS(obj) == rb_cFloat &&
6565  BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
6566  return rb_flo_div_flo(recv, obj);
6567  }
6568  else {
6569  return Qundef;
6570  }
6571 }
6572 
6573 static VALUE
6574 vm_opt_mod(VALUE recv, VALUE obj)
6575 {
6576  if (FIXNUM_2_P(recv, obj) &&
6577  BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
6578  return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
6579  }
6580  else if (FLONUM_2_P(recv, obj) &&
6581  BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6582  return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6583  }
6584  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6585  return Qundef;
6586  }
6587  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6588  RBASIC_CLASS(obj) == rb_cFloat &&
6589  BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
6590  return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
6591  }
6592  else {
6593  return Qundef;
6594  }
6595 }
6596 
6597 static VALUE
6598 vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
6599 {
6600  if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
6601  VALUE val = opt_equality(iseq, recv, obj, cd_eq);
6602 
6603  if (!UNDEF_P(val)) {
6604  return RBOOL(!RTEST(val));
6605  }
6606  }
6607 
6608  return Qundef;
6609 }
6610 
6611 static VALUE
6612 vm_opt_lt(VALUE recv, VALUE obj)
6613 {
6614  if (FIXNUM_2_P(recv, obj) &&
6615  BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
6616  return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
6617  }
6618  else if (FLONUM_2_P(recv, obj) &&
6619  BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6620  return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6621  }
6622  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6623  return Qundef;
6624  }
6625  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6626  RBASIC_CLASS(obj) == rb_cFloat &&
6627  BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
6628  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6629  return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
6630  }
6631  else {
6632  return Qundef;
6633  }
6634 }
6635 
6636 static VALUE
6637 vm_opt_le(VALUE recv, VALUE obj)
6638 {
6639  if (FIXNUM_2_P(recv, obj) &&
6640  BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
6641  return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
6642  }
6643  else if (FLONUM_2_P(recv, obj) &&
6644  BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6645  return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6646  }
6647  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6648  return Qundef;
6649  }
6650  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6651  RBASIC_CLASS(obj) == rb_cFloat &&
6652  BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
6653  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6654  return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
6655  }
6656  else {
6657  return Qundef;
6658  }
6659 }
6660 
6661 static VALUE
6662 vm_opt_gt(VALUE recv, VALUE obj)
6663 {
6664  if (FIXNUM_2_P(recv, obj) &&
6665  BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
6666  return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
6667  }
6668  else if (FLONUM_2_P(recv, obj) &&
6669  BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6670  return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6671  }
6672  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6673  return Qundef;
6674  }
6675  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6676  RBASIC_CLASS(obj) == rb_cFloat &&
6677  BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
6678  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6679  return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
6680  }
6681  else {
6682  return Qundef;
6683  }
6684 }
6685 
6686 static VALUE
6687 vm_opt_ge(VALUE recv, VALUE obj)
6688 {
6689  if (FIXNUM_2_P(recv, obj) &&
6690  BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
6691  return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
6692  }
6693  else if (FLONUM_2_P(recv, obj) &&
6694  BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6695  return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6696  }
6697  else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
6698  return Qundef;
6699  }
6700  else if (RBASIC_CLASS(recv) == rb_cFloat &&
6701  RBASIC_CLASS(obj) == rb_cFloat &&
6702  BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
6703  CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
6704  return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
6705  }
6706  else {
6707  return Qundef;
6708  }
6709 }
6710 
6711 
6712 static VALUE
6713 vm_opt_ltlt(VALUE recv, VALUE obj)
6714 {
6715  if (SPECIAL_CONST_P(recv)) {
6716  return Qundef;
6717  }
6718  else if (RBASIC_CLASS(recv) == rb_cString &&
6719  BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
6720  if (LIKELY(RB_TYPE_P(obj, T_STRING))) {
6721  return rb_str_buf_append(recv, obj);
6722  }
6723  else {
6724  return rb_str_concat(recv, obj);
6725  }
6726  }
6727  else if (RBASIC_CLASS(recv) == rb_cArray &&
6728  BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
6729  return rb_ary_push(recv, obj);
6730  }
6731  else {
6732  return Qundef;
6733  }
6734 }
6735 
6736 static VALUE
6737 vm_opt_and(VALUE recv, VALUE obj)
6738 {
6739  // If recv and obj are both fixnums, then the bottom tag bit
6740  // will be 1 on both. 1 & 1 == 1, so the result value will also
6741  // be a fixnum. If either side is *not* a fixnum, then the tag bit
6742  // will be 0, and we return Qundef.
6743  VALUE ret = ((SIGNED_VALUE) recv) & ((SIGNED_VALUE) obj);
6744 
6745  if (FIXNUM_P(ret) &&
6746  BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
6747  return ret;
6748  }
6749  else {
6750  return Qundef;
6751  }
6752 }
6753 
6754 static VALUE
6755 vm_opt_or(VALUE recv, VALUE obj)
6756 {
6757  if (FIXNUM_2_P(recv, obj) &&
6758  BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
6759  return recv | obj;
6760  }
6761  else {
6762  return Qundef;
6763  }
6764 }
6765 
6766 static VALUE
6767 vm_opt_aref(VALUE recv, VALUE obj)
6768 {
6769  if (SPECIAL_CONST_P(recv)) {
6770  if (FIXNUM_2_P(recv, obj) &&
6771  BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
6772  return rb_fix_aref(recv, obj);
6773  }
6774  return Qundef;
6775  }
6776  else if (RBASIC_CLASS(recv) == rb_cArray &&
6777  BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
6778  if (FIXNUM_P(obj)) {
6779  return rb_ary_entry_internal(recv, FIX2LONG(obj));
6780  }
6781  else {
6782  return rb_ary_aref1(recv, obj);
6783  }
6784  }
6785  else if (RBASIC_CLASS(recv) == rb_cHash &&
6786  BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
6787  return rb_hash_aref(recv, obj);
6788  }
6789  else {
6790  return Qundef;
6791  }
6792 }
6793 
6794 static VALUE
6795 vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
6796 {
6797  if (SPECIAL_CONST_P(recv)) {
6798  return Qundef;
6799  }
6800  else if (RBASIC_CLASS(recv) == rb_cArray &&
6801  BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
6802  FIXNUM_P(obj)) {
6803  rb_ary_store(recv, FIX2LONG(obj), set);
6804  return set;
6805  }
6806  else if (RBASIC_CLASS(recv) == rb_cHash &&
6807  BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
6808  rb_hash_aset(recv, obj, set);
6809  return set;
6810  }
6811  else {
6812  return Qundef;
6813  }
6814 }
6815 
6816 static VALUE
6817 vm_opt_aref_with(VALUE recv, VALUE key)
6818 {
6819  if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6820  BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
6821  rb_hash_compare_by_id_p(recv) == Qfalse &&
6822  !FL_TEST(recv, RHASH_PROC_DEFAULT)) {
6823  return rb_hash_aref(recv, key);
6824  }
6825  else {
6826  return Qundef;
6827  }
6828 }
6829 
6830 VALUE
6831 rb_vm_opt_aref_with(VALUE recv, VALUE key)
6832 {
6833  return vm_opt_aref_with(recv, key);
6834 }
6835 
6836 static VALUE
6837 vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
6838 {
6839  if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
6840  BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
6841  rb_hash_compare_by_id_p(recv) == Qfalse) {
6842  return rb_hash_aset(recv, key, val);
6843  }
6844  else {
6845  return Qundef;
6846  }
6847 }
6848 
6849 static VALUE
6850 vm_opt_length(VALUE recv, int bop)
6851 {
6852  if (SPECIAL_CONST_P(recv)) {
6853  return Qundef;
6854  }
6855  else if (RBASIC_CLASS(recv) == rb_cString &&
6856  BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
6857  if (bop == BOP_EMPTY_P) {
6858  return LONG2NUM(RSTRING_LEN(recv));
6859  }
6860  else {
6861  return rb_str_length(recv);
6862  }
6863  }
6864  else if (RBASIC_CLASS(recv) == rb_cArray &&
6865  BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
6866  return LONG2NUM(RARRAY_LEN(recv));
6867  }
6868  else if (RBASIC_CLASS(recv) == rb_cHash &&
6869  BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
6870  return INT2FIX(RHASH_SIZE(recv));
6871  }
6872  else {
6873  return Qundef;
6874  }
6875 }
6876 
6877 static VALUE
6878 vm_opt_empty_p(VALUE recv)
6879 {
6880  switch (vm_opt_length(recv, BOP_EMPTY_P)) {
6881  case Qundef: return Qundef;
6882  case INT2FIX(0): return Qtrue;
6883  default: return Qfalse;
6884  }
6885 }
6886 
6887 VALUE rb_false(VALUE obj);
6888 
6889 static VALUE
6890 vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6891 {
6892  if (NIL_P(recv) &&
6893  BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
6894  return Qtrue;
6895  }
6896  else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
6897  return Qfalse;
6898  }
6899  else {
6900  return Qundef;
6901  }
6902 }
6903 
6904 static VALUE
6905 fix_succ(VALUE x)
6906 {
6907  switch (x) {
6908  case ~0UL:
6909  /* 0xFFFF_FFFF == INT2FIX(-1)
6910  * `-1.succ` is of course 0. */
6911  return INT2FIX(0);
6912  case RSHIFT(~0UL, 1):
6913  /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6914  * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6915  return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
6916  default:
6917  /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6918  * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6919  * == lx*2 + ly*2 + 1
6920  * == (lx*2+1) + (ly*2+1) - 1
6921  * == x + y - 1
6922  *
6923  * Here, if we put y := INT2FIX(1):
6924  *
6925  * == x + INT2FIX(1) - 1
6926  * == x + 2 .
6927  */
6928  return x + 2;
6929  }
6930 }
6931 
6932 static VALUE
6933 vm_opt_succ(VALUE recv)
6934 {
6935  if (FIXNUM_P(recv) &&
6936  BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
6937  return fix_succ(recv);
6938  }
6939  else if (SPECIAL_CONST_P(recv)) {
6940  return Qundef;
6941  }
6942  else if (RBASIC_CLASS(recv) == rb_cString &&
6943  BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
6944  return rb_str_succ(recv);
6945  }
6946  else {
6947  return Qundef;
6948  }
6949 }
6950 
6951 static VALUE
6952 vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
6953 {
6954  if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
6955  return RBOOL(!RTEST(recv));
6956  }
6957  else {
6958  return Qundef;
6959  }
6960 }
6961 
6962 static VALUE
6963 vm_opt_regexpmatch2(VALUE recv, VALUE obj)
6964 {
6965  if (SPECIAL_CONST_P(recv)) {
6966  return Qundef;
6967  }
6968  else if (RBASIC_CLASS(recv) == rb_cString &&
6969  CLASS_OF(obj) == rb_cRegexp &&
6970  BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
6971  return rb_reg_match(obj, recv);
6972  }
6973  else if (RBASIC_CLASS(recv) == rb_cRegexp &&
6974  BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
6975  return rb_reg_match(recv, obj);
6976  }
6977  else {
6978  return Qundef;
6979  }
6980 }
6981 
6982 rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
6983 
6984 NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
6985 
6986 static inline void
6987 vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
6988  rb_event_flag_t pc_events, rb_event_flag_t target_event,
6989  rb_hook_list_t *global_hooks, rb_hook_list_t *const *local_hooks_ptr, VALUE val)
6990 {
6991  rb_event_flag_t event = pc_events & target_event;
6992  VALUE self = GET_SELF();
6993 
6994  VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
6995 
6996  if (event & global_hooks->events) {
6997  /* increment PC because source line is calculated with PC-1 */
6998  reg_cfp->pc++;
6999  vm_dtrace(event, ec);
7000  rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
7001  reg_cfp->pc--;
7002  }
7003 
7004  // Load here since global hook above can add and free local hooks
7005  rb_hook_list_t *local_hooks = *local_hooks_ptr;
7006  if (local_hooks != NULL) {
7007  if (event & local_hooks->events) {
7008  /* increment PC because source line is calculated with PC-1 */
7009  reg_cfp->pc++;
7010  rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
7011  reg_cfp->pc--;
7012  }
7013  }
7014 }
7015 
7016 #define VM_TRACE_HOOK(target_event, val) do { \
7017  if ((pc_events & (target_event)) & enabled_flags) { \
7018  vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
7019  } \
7020 } while (0)
7021 
7022 static VALUE
7023 rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
7024 {
7025  VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
7026  VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
7027  return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
7028 }
7029 
7030 static void
7031 vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
7032 {
7033  const VALUE *pc = reg_cfp->pc;
7034  rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
7035  rb_event_flag_t global_events = enabled_flags;
7036 
7037  if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
7038  return;
7039  }
7040  else {
7041  const rb_iseq_t *iseq = reg_cfp->iseq;
7042  VALUE iseq_val = (VALUE)iseq;
7043  size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
7044  rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
7045  rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
7046  rb_hook_list_t *const *local_hooks_ptr = &iseq->aux.exec.local_hooks;
7047  rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
7048  rb_hook_list_t *bmethod_local_hooks = NULL;
7049  rb_hook_list_t **bmethod_local_hooks_ptr = NULL;
7050  rb_event_flag_t bmethod_local_events = 0;
7051  const bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
7052  enabled_flags |= iseq_local_events;
7053 
7054  VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
7055 
7056  if (bmethod_frame) {
7057  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
7058  VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
7059  bmethod_local_hooks = me->def->body.bmethod.hooks;
7060  bmethod_local_hooks_ptr = &me->def->body.bmethod.hooks;
7061  if (bmethod_local_hooks) {
7062  bmethod_local_events = bmethod_local_hooks->events;
7063  }
7064  }
7065 
7066 
7067  if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
7068 #if 0
7069  /* disable trace */
7070  /* TODO: incomplete */
7071  rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
7072 #else
7073  /* do not disable trace because of performance problem
7074  * (re-enable overhead)
7075  */
7076 #endif
7077  return;
7078  }
7079  else if (ec->trace_arg != NULL) {
7080  /* already tracing */
7081  return;
7082  }
7083  else {
7084  rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
7085  /* Note, not considering iseq local events here since the same
7086  * iseq could be used in multiple bmethods. */
7087  rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
7088 
7089  if (0) {
7090  ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
7091  (int)pos,
7092  (int)pc_events,
7093  RSTRING_PTR(rb_iseq_path(iseq)),
7094  (int)rb_iseq_line_no(iseq, pos),
7095  RSTRING_PTR(rb_iseq_label(iseq)));
7096  }
7097  VM_ASSERT(reg_cfp->pc == pc);
7098  VM_ASSERT(pc_events != 0);
7099 
7100  /* check traces */
7101  if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
7102  /* b_call instruction running as a method. Fire call event. */
7103  vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
7104  }
7106  VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
7107  VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
7108  VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
7109  VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
7110  VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
7111  if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
7112  /* b_return instruction running as a method. Fire return event. */
7113  vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks_ptr, TOPN(0));
7114  }
7115 
7116  // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
7117  // We need the pointer to stay valid in case compaction happens in a trace hook.
7118  //
7119  // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
7120  // storage for `rb_method_definition_t` is not on the GC heap.
7121  RB_GC_GUARD(iseq_val);
7122  }
7123  }
7124 }
7125 #undef VM_TRACE_HOOK
7126 
7127 #if VM_CHECK_MODE > 0
7128 NORETURN( NOINLINE( COLDFUNC
7129 void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
7130 
7131 void
7132 Init_vm_stack_canary(void)
7133 {
7134  /* This has to be called _after_ our PRNG is properly set up. */
7135  int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
7136  vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
7137 
7138  vm_stack_canary_was_born = true;
7139  VM_ASSERT(n == 0);
7140 }
7141 
7142 void
7143 rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
7144 {
7145  /* Because a method has already been called, why not call
7146  * another one. */
7147  const char *insn = rb_insns_name(i);
7148  VALUE inspection = rb_inspect(c);
7149  const char *str = StringValueCStr(inspection);
7150 
7151  rb_bug("dead canary found at %s: %s", insn, str);
7152 }
7153 
7154 #else
7155 void Init_vm_stack_canary(void) { /* nothing to do */ }
7156 #endif
7157 
7158 
7159 /* a part of the following code is generated by this ruby script:
7160 
7161 16.times{|i|
7162  typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
7163  typedef_args.prepend(", ") if i != 0
7164  call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
7165  call_args.prepend(", ") if i != 0
7166  puts %Q{
7167 static VALUE
7168 builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7169 {
7170  typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
7171  return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
7172 }}
7173 }
7174 
7175 puts
7176 puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
7177 16.times{|i|
7178  puts " builtin_invoker#{i},"
7179 }
7180 puts "};"
7181 */
7182 
7183 static VALUE
7184 builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7185 {
7186  typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
7187  return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
7188 }
7189 
7190 static VALUE
7191 builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7192 {
7193  typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
7194  return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
7195 }
7196 
7197 static VALUE
7198 builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7199 {
7200  typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
7201  return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
7202 }
7203 
7204 static VALUE
7205 builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7206 {
7207  typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
7208  return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
7209 }
7210 
7211 static VALUE
7212 builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7213 {
7214  typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
7215  return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
7216 }
7217 
7218 static VALUE
7219 builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7220 {
7221  typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
7222  return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
7223 }
7224 
7225 static VALUE
7226 builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7227 {
7228  typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
7229  return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
7230 }
7231 
7232 static VALUE
7233 builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7234 {
7235  typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
7236  return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
7237 }
7238 
7239 static VALUE
7240 builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7241 {
7242  typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
7243  return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
7244 }
7245 
7246 static VALUE
7247 builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7248 {
7249  typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
7250  return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
7251 }
7252 
7253 static VALUE
7254 builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7255 {
7256  typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
7257  return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
7258 }
7259 
7260 static VALUE
7261 builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7262 {
7263  typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
7264  return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
7265 }
7266 
7267 static VALUE
7268 builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7269 {
7270  typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
7271  return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
7272 }
7273 
7274 static VALUE
7275 builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7276 {
7277  typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
7278  return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
7279 }
7280 
7281 static VALUE
7282 builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7283 {
7284  typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
7285  return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
7286 }
7287 
7288 static VALUE
7289 builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
7290 {
7291  typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
7292  return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
7293 }
7294 
7295 typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
7296 
7297 static builtin_invoker
7298 lookup_builtin_invoker(int argc)
7299 {
7300  static const builtin_invoker invokers[] = {
7301  builtin_invoker0,
7302  builtin_invoker1,
7303  builtin_invoker2,
7304  builtin_invoker3,
7305  builtin_invoker4,
7306  builtin_invoker5,
7307  builtin_invoker6,
7308  builtin_invoker7,
7309  builtin_invoker8,
7310  builtin_invoker9,
7311  builtin_invoker10,
7312  builtin_invoker11,
7313  builtin_invoker12,
7314  builtin_invoker13,
7315  builtin_invoker14,
7316  builtin_invoker15,
7317  };
7318 
7319  return invokers[argc];
7320 }
7321 
7322 static inline VALUE
7323 invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7324 {
7325  const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
7326  SETUP_CANARY(canary_p);
7327  rb_insn_func_t func_ptr = (rb_insn_func_t)(uintptr_t)bf->func_ptr;
7328  VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, func_ptr);
7329  CHECK_CANARY(canary_p, BIN(invokebuiltin));
7330  return ret;
7331 }
7332 
7333 static VALUE
7334 vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
7335 {
7336  return invoke_bf(ec, cfp, bf, argv);
7337 }
7338 
7339 static VALUE
7340 vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
7341 {
7342  if (0) { // debug print
7343  fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
7344  for (int i=0; i<bf->argc; i++) {
7345  ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
7346  }
7347  ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc,
7348  (void *)(uintptr_t)bf->func_ptr);
7349  }
7350 
7351  if (bf->argc == 0) {
7352  return invoke_bf(ec, cfp, bf, NULL);
7353  }
7354  else {
7355  const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
7356  return invoke_bf(ec, cfp, bf, argv);
7357  }
7358 }
7359 
7360 // for __builtin_inline!()
7361 
7362 VALUE
7363 rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
7364 {
7365  const rb_control_frame_t *cfp = ec->cfp;
7366  return cfp->ep[index];
7367 }
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition: assert.h:219
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition: event.h:40
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition: event.h:43
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition: event.h:56
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition: event.h:39
#define RUBY_EVENT_LINE
Encountered a new line.
Definition: event.h:38
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition: event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition: event.h:44
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition: event.h:55
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:41
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
Definition: event.h:61
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition: class.c:2297
VALUE rb_module_new(void)
Creates a new, anonymous module.
Definition: class.c:1076
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition: class.c:971
VALUE rb_define_class_id(ID id, VALUE super)
This is a very badly designed API that creates an anonymous class.
Definition: class.c:950
#define TYPE(_)
Old name of rb_type.
Definition: value_type.h:108
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition: memory.h:398
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:395
#define RFLOAT_VALUE
Old name of rb_float_value.
Definition: double.h:28
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition: array.h:659
#define FIXABLE
Old name of RB_FIXABLE.
Definition: fixnum.h:25
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition: assume.h:27
#define FIX2ULONG
Old name of RB_FIX2ULONG.
Definition: long.h:47
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:394
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition: error.h:38
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:131
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition: fl_type.h:69
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition: fl_type.h:130
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void rb_notimplement(void)
Definition: error.c:3670
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3627
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:676
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1088
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1403
VALUE rb_eFatal
fatal exception.
Definition: error.c:1399
VALUE rb_eNoMethodError
NoMethodError exception.
Definition: error.c:1411
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition: eval.c:689
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1401
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:465
void rb_error_frozen_object(VALUE frozen_obj)
Identical to rb_error_frozen(), except it takes arbitrary Ruby object instead of C's string.
Definition: error.c:3991
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition: error.c:1454
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1404
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
void rb_warning(const char *fmt,...)
Issues a warning.
Definition: error.c:496
VALUE rb_cClass
Class class.
Definition: object.c:68
VALUE rb_cArray
Array class.
Definition: array.c:40
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:2091
VALUE rb_cRegexp
Regexp class.
Definition: re.c:2640
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
Definition: object.c:1270
VALUE rb_cHash
Hash class.
Definition: hash.c:113
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:247
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition: object.c:678
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:64
VALUE rb_cModule
Module class.
Definition: object.c:67
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition: object.c:237
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:863
VALUE rb_cFloat
Float class.
Definition: numeric.c:197
VALUE rb_cProc
Proc class.
Definition: proc.c:44
VALUE rb_cString
String class.
Definition: string.c:78
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition: gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition: gc.h:603
int rb_during_gc(void)
Queries if the GC is busy.
Definition: gc.c:3378
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
Definition: array.c:4981
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
Definition: array.c:1496
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
Definition: array.c:2777
VALUE rb_ary_unshift(VALUE ary, VALUE elem)
Destructively prepends the passed item at the beginning of the passed array.
Definition: array.c:1719
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
Definition: array.c:4918
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
Definition: array.c:1397
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
Definition: array.c:1014
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
Definition: array.c:1431
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
Definition: array.c:859
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
Definition: array.c:1737
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
Definition: array.c:1207
VALUE rb_dbl2big(double d)
Converts a C's double into a bignum.
Definition: bignum.c:5285
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
Definition: error.h:35
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
Definition: hash.c:2099
VALUE rb_hash_dup(VALUE hash)
Duplicates a hash.
Definition: hash.c:1563
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition: proc.c:995
VALUE rb_reg_last_match(VALUE md)
This just returns the argument, stringified.
Definition: re.c:1930
VALUE rb_reg_match(VALUE re, VALUE str)
This is the match operator.
Definition: re.c:3695
VALUE rb_reg_nth_match(int n, VALUE md)
Queries the nth captured substring.
Definition: re.c:1905
VALUE rb_reg_match_post(VALUE md)
The portion of the original string after the given match.
Definition: re.c:1987
VALUE rb_reg_nth_defined(int n, VALUE md)
Identical to rb_reg_nth_match(), except it just returns Boolean.
Definition: re.c:1888
VALUE rb_reg_match_pre(VALUE md)
The portion of the original string before the given match.
Definition: re.c:1954
VALUE rb_reg_match_last(VALUE md)
The portion of the original string that captured at the very last.
Definition: re.c:2020
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition: string.c:3662
VALUE rb_sym_to_s(VALUE sym)
This is an rb_sym2str() + rb_str_dup() combo.
Definition: string.c:12138
VALUE rb_str_succ(VALUE orig)
Searches for the "successor" of a string.
Definition: string.c:5254
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3628
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition: string.c:3904
VALUE rb_str_length(VALUE)
Identical to rb_str_strlen(), except it returns the value in rb_cInteger.
Definition: string.c:2354
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3440
VALUE rb_str_intern(VALUE str)
Identical to rb_to_symbol(), except it assumes the receiver being an instance of RString.
Definition: symbol.c:878
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition: thread.c:1464
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition: variable.c:3151
VALUE rb_attr_get(VALUE obj, ID name)
Identical to rb_ivar_get()
Definition: variable.c:1358
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1859
void rb_cvar_set(VALUE klass, ID name, VALUE val)
Assigns a value to a class variable.
Definition: variable.c:3942
VALUE rb_cvar_find(VALUE klass, ID name, VALUE *front)
Identical to rb_cvar_get(), except it takes additional "front" pointer.
Definition: variable.c:3997
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1350
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition: variable.c:3619
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition: variable.c:2986
VALUE rb_mod_name(VALUE mod)
Queries the name of a module.
Definition: variable.c:130
VALUE rb_const_get_at(VALUE space, ID name)
Identical to rb_const_defined_at(), except it returns the actual defined value.
Definition: variable.c:3157
void rb_set_class_path_string(VALUE klass, VALUE space, VALUE name)
Identical to rb_set_class_path(), except it accepts the name as Ruby's string instead of C's.
Definition: variable.c:336
VALUE rb_ivar_defined(VALUE obj, ID name)
Queries if the instance variable is defined at the object.
Definition: variable.c:1876
int rb_const_defined_at(VALUE space, ID name)
Identical to rb_const_defined(), except it doesn't look for parent classes.
Definition: variable.c:3478
VALUE rb_cvar_defined(VALUE klass, ID name)
Queries if the given class has the given class variable.
Definition: variable.c:4019
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:293
int rb_const_defined(VALUE space, ID name)
Queries if the constant is defined at the namespace.
Definition: variable.c:3472
int rb_method_basic_definition_p(VALUE klass, ID mid)
Well...
Definition: vm_method.c:2833
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition: vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition: vm_method.c:1292
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_method_boundp(VALUE klass, ID id, int ex)
Queries if the klass has this method.
Definition: vm_method.c:1825
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition: symbol.c:1117
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:992
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:970
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
Definition: symbol.c:986
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
int off
Offset inside of ptr.
Definition: io.h:5
int len
Length of the buffer.
Definition: io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition: ractor.h:235
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1240
VALUE rb_uint2big(uintptr_t i)
Converts a C's intptr_t into an instance of rb_cInteger.
Definition: bignum.c:3200
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:367
#define ALLOCA_N(type, n)
Definition: memory.h:287
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:162
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition: memory.h:379
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
Definition: rarray.h:366
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:281
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:44
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition: rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:79
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition: robject.h:136
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:416
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition: rstring.h:367
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition: rstring.h:89
#define RB_PASS_KEYWORDS
Pass keywords, final argument should be a hash of keywords.
Definition: scan_args.h:72
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition: scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define ANYARGS
Functions declared using this macro take arbitrary arguments, including void.
Definition: stdarg.h:64
Ruby's array.
Definition: rarray.h:128
const VALUE ary[1]
Embedded elements.
Definition: rarray.h:188
Definition: hash.h:53
Definition: iseq.h:269
Definition: vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition: vm_core.h:285
Definition: vm_core.h:293
Definition: vm_core.h:288
Definition: method.h:62
Definition: constant.h:33
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
Definition: method.h:54
rb_cref_t * cref
class reference, should be marked
Definition: method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: shape.h:44
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:88
SVAR (Special VARiable)
Definition: imemo.h:52
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition: imemo.h:54
THROW_DATA.
Definition: imemo.h:61
Definition: vm_core.h:297
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition: value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition: value_type.h:182
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
Definition: value_type.h:264
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
Definition: gc.c:4204