Ruby  3.4.0dev (2024-11-05 revision e440268d51fe02b303e3817a7a733a0dac1c5091)
vm_callinfo.h (e440268d51fe02b303e3817a7a733a0dac1c5091)
1 #ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2 #define RUBY_VM_CALLINFO_H
11 #include "debug_counter.h"
12 #include "internal/class.h"
13 #include "shape.h"
14 
15 enum vm_call_flag_bits {
16  VM_CALL_ARGS_SPLAT_bit, // m(*args)
17  VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
18  VM_CALL_FCALL_bit, // m(args) # receiver is self
19  VM_CALL_VCALL_bit, // m # method call that looks like a local variable
20  VM_CALL_ARGS_SIMPLE_bit, // !(ci->flag & (SPLAT|BLOCKARG|KWARG|KW_SPLAT|FORWARDING)) && !has_block_iseq
21  VM_CALL_KWARG_bit, // has kwarg
22  VM_CALL_KW_SPLAT_bit, // m(**opts)
23  VM_CALL_TAILCALL_bit, // located at tail position
24  VM_CALL_SUPER_bit, // super
25  VM_CALL_ZSUPER_bit, // zsuper
26  VM_CALL_OPT_SEND_bit, // internal flag
27  VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
28  VM_CALL_ARGS_SPLAT_MUT_bit, // args splat can be modified (to avoid allocating a new one)
29  VM_CALL_FORWARDING_bit, // m(...)
30  VM_CALL__END
31 };
32 
33 #define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
34 #define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
35 #define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
36 #define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
37 #define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
38 #define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39 #define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40 #define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41 #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42 #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43 #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44 #define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45 #define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46 #define VM_CALL_FORWARDING (0x01 << VM_CALL_FORWARDING_bit)
47 
49  int keyword_len;
50  int references;
51  VALUE keywords[];
52 };
53 
54 static inline size_t
55 rb_callinfo_kwarg_bytes(int keyword_len)
56 {
57  return rb_size_mul_add_or_raise(
58  keyword_len,
59  sizeof(VALUE),
60  sizeof(struct rb_callinfo_kwarg),
62 }
63 
64 // imemo_callinfo
65 struct rb_callinfo {
66  VALUE flags;
67  const struct rb_callinfo_kwarg *kwarg;
68  VALUE mid;
69  VALUE flag;
70  VALUE argc;
71 };
72 
73 #if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
74 #undef USE_EMBED_CI
75 #define USE_EMBED_CI 1
76 #else
77 #undef USE_EMBED_CI
78 #define USE_EMBED_CI 0
79 #endif
80 
81 #if SIZEOF_VALUE == 8
82 #define CI_EMBED_TAG_bits 1
83 #define CI_EMBED_ARGC_bits 15
84 #define CI_EMBED_FLAG_bits 16
85 #define CI_EMBED_ID_bits 32
86 #elif SIZEOF_VALUE == 4
87 #define CI_EMBED_TAG_bits 1
88 #define CI_EMBED_ARGC_bits 3
89 #define CI_EMBED_FLAG_bits 13
90 #define CI_EMBED_ID_bits 15
91 #endif
92 
93 #if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
94 #error
95 #endif
96 
97 #define CI_EMBED_FLAG 0x01
98 #define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
99 #define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
100 #define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
101 #define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
102 #define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
103 #define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
104 
105 static inline bool
106 vm_ci_packed_p(const struct rb_callinfo *ci)
107 {
108  if (!USE_EMBED_CI) {
109  return 0;
110  }
111  if (LIKELY(((VALUE)ci) & 0x01)) {
112  return 1;
113  }
114  else {
115  VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
116  return 0;
117  }
118 }
119 
120 static inline bool
121 vm_ci_p(const struct rb_callinfo *ci)
122 {
123  if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
124  return 1;
125  }
126  else {
127  return 0;
128  }
129 }
130 
131 static inline ID
132 vm_ci_mid(const struct rb_callinfo *ci)
133 {
134  if (vm_ci_packed_p(ci)) {
135  return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
136  }
137  else {
138  return (ID)ci->mid;
139  }
140 }
141 
142 static inline unsigned int
143 vm_ci_flag(const struct rb_callinfo *ci)
144 {
145  if (vm_ci_packed_p(ci)) {
146  return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
147  }
148  else {
149  return (unsigned int)ci->flag;
150  }
151 }
152 
153 static inline unsigned int
154 vm_ci_argc(const struct rb_callinfo *ci)
155 {
156  if (vm_ci_packed_p(ci)) {
157  return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
158  }
159  else {
160  return (unsigned int)ci->argc;
161  }
162 }
163 
164 static inline const struct rb_callinfo_kwarg *
165 vm_ci_kwarg(const struct rb_callinfo *ci)
166 {
167  if (vm_ci_packed_p(ci)) {
168  return NULL;
169  }
170  else {
171  return ci->kwarg;
172  }
173 }
174 
175 static inline void
176 vm_ci_dump(const struct rb_callinfo *ci)
177 {
178  if (vm_ci_packed_p(ci)) {
179  ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
180  rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
181  }
182  else {
183  rp(ci);
184  }
185 }
186 
187 #define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188 #define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
189 
190 /* This is passed to STATIC_ASSERT. Cannot be an inline function. */
191 #define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
192  (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
193  ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
194  ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
195  (kwarg) ? false : true)
196 
197 #define vm_ci_new_id(mid, flag, argc, must_zero) \
198  ((const struct rb_callinfo *) \
199  ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
200  (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
201  (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
202  RUBY_FIXNUM_FLAG))
203 
204 // vm_method.c
205 const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
206 void rb_vm_ci_free(const struct rb_callinfo *);
207 
208 static inline const struct rb_callinfo *
209 vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
210 {
211  if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
212  RB_DEBUG_COUNTER_INC(ci_packed);
213  return vm_ci_new_id(mid, flag, argc, kwarg);
214  }
215 
216  const bool debug = 0;
217  if (debug) ruby_debug_printf("%s:%d ", file, line);
218 
219  const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
220 
221  if (debug) rp(ci);
222  if (kwarg) {
223  RB_DEBUG_COUNTER_INC(ci_kw);
224  }
225  else {
226  RB_DEBUG_COUNTER_INC(ci_nokw);
227  }
228 
229  VM_ASSERT(vm_ci_flag(ci) == flag);
230  VM_ASSERT(vm_ci_argc(ci) == argc);
231 
232  return ci;
233 }
234 
235 
236 static inline const struct rb_callinfo *
237 vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
238 {
239  RB_DEBUG_COUNTER_INC(ci_runtime);
240  return vm_ci_new_(mid, flag, argc, kwarg, file, line);
241 }
242 
243 #define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
244 
245 static inline bool
246 vm_ci_markable(const struct rb_callinfo *ci)
247 {
248  if (! ci) {
249  return false; /* or true? This is Qfalse... */
250  }
251  else if (vm_ci_packed_p(ci)) {
252  return true;
253  }
254  else {
255  VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
256  return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
257  }
258 }
259 
260 #define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
261  (struct rb_callinfo) { \
262  .flags = T_IMEMO | \
263  (imemo_callinfo << FL_USHIFT) | \
264  VM_CALLINFO_NOT_UNDER_GC, \
265  .mid = mid_, \
266  .flag = flags_, \
267  .argc = argc_, \
268  .kwarg = kwarg_, \
269  }
270 
271 typedef VALUE (*vm_call_handler)(
272  struct rb_execution_context_struct *ec,
273  struct rb_control_frame_struct *cfp,
274  struct rb_calling_info *calling);
275 
276 // imemo_callcache
277 
278 struct rb_callcache {
279  const VALUE flags;
280 
281  /* inline cache: key */
282  const VALUE klass; // should not mark it because klass can not be free'd
283  // because of this marking. When klass is collected,
284  // cc will be cleared (cc->klass = 0) at vm_ccs_free().
285 
286  /* inline cache: values */
287  const struct rb_callable_method_entry_struct * const cme_;
288  const vm_call_handler call_;
289 
290  union {
291  struct {
292  uintptr_t value; // Shape ID in upper bits, index in lower bits
293  } attr;
294  const enum method_missing_reason method_missing_reason; /* used by method_missing */
295  VALUE v;
296  const struct rb_builtin_function *bf;
297  } aux_;
298 };
299 
300 #define VM_CALLCACHE_UNMARKABLE FL_FREEZE
301 #define VM_CALLCACHE_ON_STACK FL_EXIVAR
302 
303 /* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
304 #define VM_CALLCACHE_IVAR IMEMO_FL_USER0
305 #define VM_CALLCACHE_BF IMEMO_FL_USER1
306 #define VM_CALLCACHE_SUPER IMEMO_FL_USER2
307 #define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
308 
309 enum vm_cc_type {
310  cc_type_normal, // chained from ccs
311  cc_type_super,
312  cc_type_refinement,
313 };
314 
315 extern const struct rb_callcache *rb_vm_empty_cc(void);
316 extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
317 
318 #define vm_cc_empty() rb_vm_empty_cc()
319 
320 static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
321 
322 static inline void
323 vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
324 {
325  vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
326 }
327 
328 static inline const struct rb_callcache *
329 vm_cc_new(VALUE klass,
330  const struct rb_callable_method_entry_struct *cme,
331  vm_call_handler call,
332  enum vm_cc_type type)
333 {
334  struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
335  *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
336  *((vm_call_handler *)&cc->call_) = call;
337 
338  VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
339 
340  switch (type) {
341  case cc_type_normal:
342  break;
343  case cc_type_super:
344  *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
345  break;
346  case cc_type_refinement:
347  *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
348  break;
349  }
350 
351  if (cme->def->type == VM_METHOD_TYPE_ATTRSET || cme->def->type == VM_METHOD_TYPE_IVAR) {
352  vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
353  }
354 
355  RB_DEBUG_COUNTER_INC(cc_new);
356  return cc;
357 }
358 
359 static inline bool
360 vm_cc_super_p(const struct rb_callcache *cc)
361 {
362  return (cc->flags & VM_CALLCACHE_SUPER) != 0;
363 }
364 
365 static inline bool
366 vm_cc_refinement_p(const struct rb_callcache *cc)
367 {
368  return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
369 }
370 
371 #define VM_CC_ON_STACK(clazz, call, aux, cme) \
372  (struct rb_callcache) { \
373  .flags = T_IMEMO | \
374  (imemo_callcache << FL_USHIFT) | \
375  VM_CALLCACHE_UNMARKABLE | \
376  VM_CALLCACHE_ON_STACK, \
377  .klass = clazz, \
378  .cme_ = cme, \
379  .call_ = call, \
380  .aux_ = aux, \
381  }
382 
383 static inline bool
384 vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
385 {
386  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
387  VM_ASSERT(cc->klass == 0 ||
388  RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
389  return cc->klass == klass;
390 }
391 
392 static inline int
393 vm_cc_markable(const struct rb_callcache *cc)
394 {
395  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
396  return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
397 }
398 
399 static inline const struct rb_callable_method_entry_struct *
400 vm_cc_cme(const struct rb_callcache *cc)
401 {
402  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
403  VM_ASSERT(cc->call_ == NULL || // not initialized yet
404  !vm_cc_markable(cc) ||
405  cc->cme_ != NULL);
406 
407  return cc->cme_;
408 }
409 
410 static inline vm_call_handler
411 vm_cc_call(const struct rb_callcache *cc)
412 {
413  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
414  VM_ASSERT(cc->call_ != NULL);
415  return cc->call_;
416 }
417 
418 static inline attr_index_t
419 vm_cc_attr_index(const struct rb_callcache *cc)
420 {
421  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
422  return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
423 }
424 
425 static inline shape_id_t
426 vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
427 {
428  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
429 
430  return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
431 }
432 
433 static inline void
434 vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
435 {
436  uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
437  *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
438  *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
439  return;
440 }
441 
442 static inline void
443 vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
444 {
445  uintptr_t cache_value = ic->value; // Atomically read 64 bits
446  *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
447  *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
448  return;
449 }
450 
451 static inline shape_id_t
452 vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
453 {
454  return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
455 }
456 
457 static inline unsigned int
458 vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
459 {
460  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
461  return cc->aux_.method_missing_reason;
462 }
463 
464 static inline bool
465 vm_cc_invalidated_p(const struct rb_callcache *cc)
466 {
467  if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
468  return false;
469  }
470  else {
471  return true;
472  }
473 }
474 
475 // For RJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
476 static inline bool
477 vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
478 {
479  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
480  if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(cc_cme)) {
481  return 1;
482  }
483  else {
484  return 0;
485  }
486 }
487 
488 /* callcache: mutate */
489 
490 static inline void
491 vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
492 {
493  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
494  VM_ASSERT(cc != vm_cc_empty());
495  *(vm_call_handler *)&cc->call_ = call;
496 }
497 
498 static inline void
499 set_vm_cc_ivar(const struct rb_callcache *cc)
500 {
501  *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
502 }
503 
504 static inline void
505 vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
506 {
507  uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
508  if (!vm_cc_markable(cc)) {
509  *attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
510  return;
511  }
512  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
513  VM_ASSERT(cc != vm_cc_empty());
514  *attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
515  set_vm_cc_ivar(cc);
516 }
517 
518 static inline bool
519 vm_cc_ivar_p(const struct rb_callcache *cc)
520 {
521  return (cc->flags & VM_CALLCACHE_IVAR) != 0;
522 }
523 
524 static inline void
525 vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
526 {
527  *(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (attr_index_t)(index + 1);
528 }
529 
530 static inline void
531 vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
532 {
533  *(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
534 }
535 
536 static inline void
537 vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
538 {
539  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
540  VM_ASSERT(cc != vm_cc_empty());
541  *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
542 }
543 
544 static inline void
545 vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
546 {
547  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
548  VM_ASSERT(cc != vm_cc_empty());
549  *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
550  *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
551 }
552 
553 static inline bool
554 vm_cc_bf_p(const struct rb_callcache *cc)
555 {
556  return (cc->flags & VM_CALLCACHE_BF) != 0;
557 }
558 
559 static inline void
560 vm_cc_invalidate(const struct rb_callcache *cc)
561 {
562  VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
563  VM_ASSERT(cc != vm_cc_empty());
564  VM_ASSERT(cc->klass != 0); // should be enable
565 
566  *(VALUE *)&cc->klass = 0;
567  RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
568 }
569 
570 /* calldata */
571 
572 struct rb_call_data {
573  const struct rb_callinfo *ci;
574  const struct rb_callcache *cc;
575 };
576 
578 #if VM_CHECK_MODE > 0
579  VALUE debug_sig;
580 #endif
581  int capa;
582  int len;
583  const struct rb_callable_method_entry_struct *cme;
585  unsigned int argc;
586  unsigned int flag;
587  const struct rb_callcache *cc;
588  } *entries;
589 };
590 
591 #if VM_CHECK_MODE > 0
592 
593 const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
594 void rb_vm_dump_overloaded_cme_table(void);
595 
596 static inline bool
597 vm_ccs_p(const struct rb_class_cc_entries *ccs)
598 {
599  return ccs->debug_sig == ~(VALUE)ccs;
600 }
601 
602 static inline bool
603 vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
604 {
605  if (vm_cc_cme(cc) == cme ||
606  (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
607  return true;
608  }
609  else {
610 #if 1
611  // debug print
612 
613  fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
614  rp(cme);
615  rp(vm_cc_cme(cc));
616  rb_vm_lookup_overloaded_cme(cme);
617 #endif
618  return false;
619  }
620 }
621 
622 #endif
623 
624 // gc.c
625 void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
626 
627 #endif /* RUBY_VM_CALLINFO_H */
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition: fl_type.h:126
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1401
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:992
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
Definition: vm_core.h:288
Definition: method.h:62
Definition: vm_callinfo.h:584
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376