Ruby 3.5.0dev (2025-08-02 revision 30a20bc166bc37acd7dcb3788686df149c7f428a)
vm_callinfo.h (30a20bc166bc37acd7dcb3788686df149c7f428a)
1#ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12#include "internal/class.h"
13#include "shape.h"
14
15enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit, // m(*args)
17 VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
18 VM_CALL_FCALL_bit, // m(args) # receiver is self
19 VM_CALL_VCALL_bit, // m # method call that looks like a local variable
20 VM_CALL_ARGS_SIMPLE_bit, // !(ci->flag & (SPLAT|BLOCKARG|KWARG|KW_SPLAT|FORWARDING)) && !has_block_iseq
21 VM_CALL_KWARG_bit, // has kwarg
22 VM_CALL_KW_SPLAT_bit, // m(**opts)
23 VM_CALL_TAILCALL_bit, // located at tail position
24 VM_CALL_SUPER_bit, // super
25 VM_CALL_ZSUPER_bit, // zsuper
26 VM_CALL_OPT_SEND_bit, // internal flag
27 VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
28 VM_CALL_ARGS_SPLAT_MUT_bit, // args splat can be modified (to avoid allocating a new one)
29 VM_CALL_FORWARDING_bit, // m(...)
30 VM_CALL__END
31};
32
33#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
34#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
35#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
36#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
37#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
38#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45#define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46#define VM_CALL_FORWARDING (0x01 << VM_CALL_FORWARDING_bit)
47
49 int keyword_len;
50 int references;
51 VALUE keywords[];
52};
53
54static inline size_t
55rb_callinfo_kwarg_bytes(int keyword_len)
56{
57 return rb_size_mul_add_or_raise(
58 keyword_len,
59 sizeof(VALUE),
60 sizeof(struct rb_callinfo_kwarg),
62}
63
64// imemo_callinfo
66 VALUE flags;
67 const struct rb_callinfo_kwarg *kwarg;
68 VALUE mid;
69 VALUE flag;
70 VALUE argc;
71};
72
73#if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
74#undef USE_EMBED_CI
75#define USE_EMBED_CI 1
76#else
77#undef USE_EMBED_CI
78#define USE_EMBED_CI 0
79#endif
80
81#if SIZEOF_VALUE == 8
82#define CI_EMBED_TAG_bits 1
83#define CI_EMBED_ARGC_bits 15
84#define CI_EMBED_FLAG_bits 16
85#define CI_EMBED_ID_bits 32
86#elif SIZEOF_VALUE == 4
87#define CI_EMBED_TAG_bits 1
88#define CI_EMBED_ARGC_bits 3
89#define CI_EMBED_FLAG_bits 13
90#define CI_EMBED_ID_bits 15
91#endif
92
93#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
94#error
95#endif
96
97#define CI_EMBED_FLAG 0x01
98#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
99#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
100#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
101#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
102#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
103#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
104
105static inline bool
106vm_ci_packed_p(const struct rb_callinfo *ci)
107{
108 if (!USE_EMBED_CI) {
109 return 0;
110 }
111 if (LIKELY(((VALUE)ci) & 0x01)) {
112 return 1;
113 }
114 else {
115 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
116 return 0;
117 }
118}
119
120static inline bool
121vm_ci_p(const struct rb_callinfo *ci)
122{
123 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
124 return 1;
125 }
126 else {
127 return 0;
128 }
129}
130
131static inline ID
132vm_ci_mid(const struct rb_callinfo *ci)
133{
134 if (vm_ci_packed_p(ci)) {
135 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
136 }
137 else {
138 return (ID)ci->mid;
139 }
140}
141
142static inline unsigned int
143vm_ci_flag(const struct rb_callinfo *ci)
144{
145 if (vm_ci_packed_p(ci)) {
146 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
147 }
148 else {
149 return (unsigned int)ci->flag;
150 }
151}
152
153static inline unsigned int
154vm_ci_argc(const struct rb_callinfo *ci)
155{
156 if (vm_ci_packed_p(ci)) {
157 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
158 }
159 else {
160 return (unsigned int)ci->argc;
161 }
162}
163
164static inline const struct rb_callinfo_kwarg *
165vm_ci_kwarg(const struct rb_callinfo *ci)
166{
167 if (vm_ci_packed_p(ci)) {
168 return NULL;
169 }
170 else {
171 return ci->kwarg;
172 }
173}
174
175static inline void
176vm_ci_dump(const struct rb_callinfo *ci)
177{
178 if (vm_ci_packed_p(ci)) {
179 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
180 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
181 }
182 else {
183 rp(ci);
184 }
185}
186
187#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
189
190/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
191#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
192 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
193 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
194 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
195 (kwarg) ? false : true)
196
197#define vm_ci_new_id(mid, flag, argc, must_zero) \
198 ((const struct rb_callinfo *) \
199 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
200 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
201 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
202 RUBY_FIXNUM_FLAG))
203
204// vm_method.c
205const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
206void rb_vm_ci_free(const struct rb_callinfo *);
207
208static inline const struct rb_callinfo *
209vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
210{
211 if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
212 RB_DEBUG_COUNTER_INC(ci_packed);
213 return vm_ci_new_id(mid, flag, argc, kwarg);
214 }
215
216 const bool debug = 0;
217 if (debug) ruby_debug_printf("%s:%d ", file, line);
218
219 const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
220
221 if (debug) rp(ci);
222 if (kwarg) {
223 RB_DEBUG_COUNTER_INC(ci_kw);
224 }
225 else {
226 RB_DEBUG_COUNTER_INC(ci_nokw);
227 }
228
229 VM_ASSERT(vm_ci_flag(ci) == flag);
230 VM_ASSERT(vm_ci_argc(ci) == argc);
231
232 return ci;
233}
234
235
236static inline const struct rb_callinfo *
237vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
238{
239 RB_DEBUG_COUNTER_INC(ci_runtime);
240 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
241}
242
243#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
244
245static inline bool
246vm_ci_markable(const struct rb_callinfo *ci)
247{
248 if (! ci) {
249 return false; /* or true? This is Qfalse... */
250 }
251 else if (vm_ci_packed_p(ci)) {
252 return true;
253 }
254 else {
255 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
256 return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
257 }
258}
259
260#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
261 (struct rb_callinfo) { \
262 .flags = T_IMEMO | \
263 (imemo_callinfo << FL_USHIFT) | \
264 VM_CALLINFO_NOT_UNDER_GC, \
265 .mid = mid_, \
266 .flag = flags_, \
267 .argc = argc_, \
268 .kwarg = kwarg_, \
269 }
270
271typedef VALUE (*vm_call_handler)(
273 struct rb_control_frame_struct *cfp,
274 struct rb_calling_info *calling);
275
276// imemo_callcache
277
279 const VALUE flags;
280
281 /* inline cache: key */
282 const VALUE klass; // Weak reference. When klass is collected, `cc->klass = Qundef`.
283
284 /* inline cache: values */
285 const struct rb_callable_method_entry_struct * const cme_;
286 const vm_call_handler call_;
287
288 union {
289 struct {
290 uint64_t value; // Shape ID in former half, index in latter half
291 } attr;
292 const enum method_missing_reason method_missing_reason; /* used by method_missing */
293 VALUE v;
294 const struct rb_builtin_function *bf;
295 } aux_;
296};
297
298/* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
299#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
300#define VM_CALLCACHE_BF IMEMO_FL_USER1
301#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
302#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
303#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER4
304#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER5
305
306enum vm_cc_type {
307 cc_type_normal, // chained from ccs
308 cc_type_super,
309 cc_type_refinement,
310};
311
312extern const struct rb_callcache *rb_vm_empty_cc(void);
313extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
314
315#define vm_cc_empty() rb_vm_empty_cc()
316
317static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
318
319static inline void
320vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
321{
322 vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
323}
324
325static inline VALUE
326cc_check_class(VALUE klass)
327{
328 VM_ASSERT(klass == Qundef || RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
329 return klass;
330}
331
332VALUE rb_vm_cc_table_create(size_t capa);
333VALUE rb_vm_cc_table_dup(VALUE old_table);
334void rb_vm_cc_table_delete(VALUE table, ID mid);
335
336static inline const struct rb_callcache *
337vm_cc_new(VALUE klass,
338 const struct rb_callable_method_entry_struct *cme,
339 vm_call_handler call,
340 enum vm_cc_type type)
341{
342 cc_check_class(klass);
343 struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
344 *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
345 *((vm_call_handler *)&cc->call_) = call;
346
347 VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
348
349 switch (type) {
350 case cc_type_normal:
351 break;
352 case cc_type_super:
353 *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
354 break;
355 case cc_type_refinement:
356 *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
357 rb_vm_insert_cc_refinement(cc);
358 break;
359 }
360
361 if (cme->def->type == VM_METHOD_TYPE_ATTRSET || cme->def->type == VM_METHOD_TYPE_IVAR) {
362 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
363 }
364
365 RB_DEBUG_COUNTER_INC(cc_new);
366 return cc;
367}
368
369static inline bool
370vm_cc_super_p(const struct rb_callcache *cc)
371{
372 return (cc->flags & VM_CALLCACHE_SUPER) != 0;
373}
374
375static inline bool
376vm_cc_refinement_p(const struct rb_callcache *cc)
377{
378 return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
379}
380
381#define VM_CC_ON_STACK(clazz, call, aux, cme) \
382 (struct rb_callcache) { \
383 .flags = T_IMEMO | \
384 (imemo_callcache << FL_USHIFT) | \
385 VM_CALLCACHE_UNMARKABLE | \
386 VM_CALLCACHE_ON_STACK, \
387 .klass = cc_check_class(clazz), \
388 .cme_ = cme, \
389 .call_ = call, \
390 .aux_ = aux, \
391 }
392
393static inline bool
394vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
395{
396 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
397 VM_ASSERT(cc_check_class(cc->klass));
398 return cc->klass == klass;
399}
400
401static inline int
402vm_cc_markable(const struct rb_callcache *cc)
403{
404 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
405 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
406}
407
408static inline bool
409vm_cc_valid(const struct rb_callcache *cc)
410{
411 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
412 VM_ASSERT(cc_check_class(cc->klass));
413
414 return !UNDEF_P(cc->klass);
415}
416
417static inline const struct rb_callable_method_entry_struct *
418vm_cc_cme(const struct rb_callcache *cc)
419{
420 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
421 VM_ASSERT(cc->call_ == NULL || // not initialized yet
422 !vm_cc_markable(cc) ||
423 cc->cme_ != NULL);
424
425 return cc->cme_;
426}
427
428static inline vm_call_handler
429vm_cc_call(const struct rb_callcache *cc)
430{
431 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
432 VM_ASSERT(cc->call_ != NULL);
433 return cc->call_;
434}
435
436static inline void
437vm_unpack_shape_and_index(const uint64_t cache_value, shape_id_t *shape_id, attr_index_t *index)
438{
439 union rb_attr_index_cache cache = {
440 .pack = cache_value,
441 };
442 *shape_id = cache.unpack.shape_id;
443 *index = cache.unpack.index - 1;
444}
445
446static inline void
447vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t *shape_id, attr_index_t *index)
448{
449 vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(cc->aux_.attr.value), shape_id, index);
450}
451
452static inline void
453vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t *shape_id, attr_index_t *index)
454{
455 vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(ic->value), shape_id, index);
456}
457
458static inline unsigned int
459vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
460{
461 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
462 return cc->aux_.method_missing_reason;
463}
464
465static inline bool
466vm_cc_invalidated_p(const struct rb_callcache *cc)
467{
468 if (vm_cc_valid(cc) && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
469 return false;
470 }
471 else {
472 return true;
473 }
474}
475
476/* callcache: mutate */
477
478static inline void
479vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
480{
481 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
482 VM_ASSERT(cc != vm_cc_empty());
483 *(vm_call_handler *)&cc->call_ = call;
484}
485
486static inline void
487set_vm_cc_ivar(const struct rb_callcache *cc)
488{
489 *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
490}
491
492static inline uint64_t
493vm_pack_shape_and_index(shape_id_t shape_id, attr_index_t index)
494{
495 union rb_attr_index_cache cache = {
496 .unpack = {
497 .shape_id = shape_id,
498 .index = index + 1,
499 }
500 };
501 return cache.pack;
502}
503
504static inline void
505vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
506{
507 uint64_t *attr_value = (uint64_t *)&cc->aux_.attr.value;
508 if (!vm_cc_markable(cc)) {
509 *attr_value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET);
510 return;
511 }
512 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
513 VM_ASSERT(cc != vm_cc_empty());
514 *attr_value = vm_pack_shape_and_index(dest_shape_id, index);
515 set_vm_cc_ivar(cc);
516}
517
518static inline bool
519vm_cc_ivar_p(const struct rb_callcache *cc)
520{
521 return (cc->flags & VM_CALLCACHE_IVAR) != 0;
522}
523
524static inline void
525vm_ic_attr_index_set(const rb_iseq_t *iseq, struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
526{
527 ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(dest_shape_id, index));
528}
529
530static inline void
531vm_ic_attr_index_initialize(struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
532{
533 ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(shape_id, ATTR_INDEX_NOT_SET));
534}
535
536static inline void
537vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
538{
539 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
540 VM_ASSERT(cc != vm_cc_empty());
541 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
542}
543
544static inline void
545vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
546{
547 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
548 VM_ASSERT(cc != vm_cc_empty());
549 *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
550 *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
551}
552
553static inline bool
554vm_cc_bf_p(const struct rb_callcache *cc)
555{
556 return (cc->flags & VM_CALLCACHE_BF) != 0;
557}
558
559static inline void
560vm_cc_invalidate(const struct rb_callcache *cc)
561{
562 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
563 VM_ASSERT(cc != vm_cc_empty());
564 VM_ASSERT(cc->klass != Qundef); // should be enable
565
566 *(VALUE *)&cc->klass = Qundef;
567 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
568}
569
570/* calldata */
571
573 const struct rb_callinfo *ci;
574 const struct rb_callcache *cc;
575};
576
578#if VM_CHECK_MODE > 0
579 VALUE debug_sig;
580#endif
581 int capa;
582 int len;
583 const struct rb_callable_method_entry_struct *cme;
585 unsigned int argc;
586 unsigned int flag;
587 const struct rb_callcache *cc;
588 } entries[FLEX_ARY_LEN];
589};
590
591static inline size_t
592vm_ccs_alloc_size(size_t capa)
593{
594 return offsetof(struct rb_class_cc_entries, entries) + (sizeof(struct rb_class_cc_entries_entry) * capa);
595}
596
597#if VM_CHECK_MODE > 0
598
599const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
600void rb_vm_dump_overloaded_cme_table(void);
601
602static inline bool
603vm_ccs_p(const struct rb_class_cc_entries *ccs)
604{
605 return ccs->debug_sig == ~(VALUE)ccs;
606}
607
608static inline bool
609vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
610{
611 bool valid;
612 RB_VM_LOCKING() {
613 valid = vm_cc_cme(cc) == cme ||
614 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme));
615 }
616 if (valid) {
617 return true;
618 }
619#if 1
620 // debug print
621
622 fprintf(stderr, "iseq_overload:%d, cme:%p (def:%p), cm_cc_cme(cc):%p (def:%p)\n",
623 (int)cme->def->iseq_overload,
624 cme, cme->def,
625 vm_cc_cme(cc), vm_cc_cme(cc)->def);
626 rp(cme);
627 rp(vm_cc_cme(cc));
628 rp(rb_vm_lookup_overloaded_cme(cme));
629#endif
630 return false;
631}
632
633#endif
634
635#endif /* RUBY_VM_CALLINFO_H */
#define Qundef
Old name of RUBY_Qundef.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition fl_type.h:125
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
int capa
Designed capacity of the buffer.
Definition io.h:11
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition vm_core.h:290
Definition method.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376