Ruby 4.1.0dev (2025-12-28 revision 3fe2ebf8e4127bca0a57d4ed8eb6035792420a26)
vm_callinfo.h (3fe2ebf8e4127bca0a57d4ed8eb6035792420a26)
1#ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12#include "internal/class.h"
13#include "shape.h"
14
15enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit, // m(*args)
17 VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
18 VM_CALL_FCALL_bit, // m(args) # receiver is self
19 VM_CALL_VCALL_bit, // m # method call that looks like a local variable
20 VM_CALL_ARGS_SIMPLE_bit, // !(ci->flag & (SPLAT|BLOCKARG|KWARG|KW_SPLAT|FORWARDING)) && !has_block_iseq
21 VM_CALL_KWARG_bit, // has kwarg
22 VM_CALL_KW_SPLAT_bit, // m(**opts)
23 VM_CALL_TAILCALL_bit, // located at tail position
24 VM_CALL_SUPER_bit, // super
25 VM_CALL_ZSUPER_bit, // zsuper
26 VM_CALL_OPT_SEND_bit, // internal flag
27 VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
28 VM_CALL_ARGS_SPLAT_MUT_bit, // args splat can be modified (to avoid allocating a new one)
29 VM_CALL_FORWARDING_bit, // m(...)
30 VM_CALL__END
31};
32
33#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
34#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
35#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
36#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
37#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
38#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45#define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46#define VM_CALL_FORWARDING (0x01 << VM_CALL_FORWARDING_bit)
47
49 int keyword_len;
50 int references;
51 VALUE keywords[];
52};
53
54static inline size_t
55rb_callinfo_kwarg_bytes(int keyword_len)
56{
57 return rb_size_mul_add_or_raise(
58 keyword_len,
59 sizeof(VALUE),
60 sizeof(struct rb_callinfo_kwarg),
62}
63
64// imemo_callinfo
66 VALUE flags;
67 const struct rb_callinfo_kwarg *kwarg;
68 VALUE mid;
69 VALUE flag;
70 VALUE argc;
71};
72
73#if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
74#undef USE_EMBED_CI
75#define USE_EMBED_CI 1
76#else
77#undef USE_EMBED_CI
78#define USE_EMBED_CI 0
79#endif
80
81#if SIZEOF_VALUE == 8
82#define CI_EMBED_TAG_bits 1
83#define CI_EMBED_ARGC_bits 15
84#define CI_EMBED_FLAG_bits 16
85#define CI_EMBED_ID_bits 32
86#elif SIZEOF_VALUE == 4
87#define CI_EMBED_TAG_bits 1
88#define CI_EMBED_ARGC_bits 3
89#define CI_EMBED_FLAG_bits 13
90#define CI_EMBED_ID_bits 15
91#endif
92
93#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
94#error
95#endif
96
97#define CI_EMBED_FLAG 0x01
98#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
99#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
100#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
101#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
102#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
103#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
104
105static inline bool
106vm_ci_packed_p(const struct rb_callinfo *ci)
107{
108 if (!USE_EMBED_CI) {
109 return 0;
110 }
111 if (LIKELY(((VALUE)ci) & 0x01)) {
112 return 1;
113 }
114 else {
115 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
116 return 0;
117 }
118}
119
120static inline bool
121vm_ci_p(const struct rb_callinfo *ci)
122{
123 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
124 return 1;
125 }
126 else {
127 return 0;
128 }
129}
130
131static inline ID
132vm_ci_mid(const struct rb_callinfo *ci)
133{
134 if (vm_ci_packed_p(ci)) {
135 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
136 }
137 else {
138 return (ID)ci->mid;
139 }
140}
141
142static inline unsigned int
143vm_ci_flag(const struct rb_callinfo *ci)
144{
145 if (vm_ci_packed_p(ci)) {
146 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
147 }
148 else {
149 return (unsigned int)ci->flag;
150 }
151}
152
153static inline unsigned int
154vm_ci_argc(const struct rb_callinfo *ci)
155{
156 if (vm_ci_packed_p(ci)) {
157 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
158 }
159 else {
160 return (unsigned int)ci->argc;
161 }
162}
163
164static inline const struct rb_callinfo_kwarg *
165vm_ci_kwarg(const struct rb_callinfo *ci)
166{
167 if (vm_ci_packed_p(ci)) {
168 return NULL;
169 }
170 else {
171 return ci->kwarg;
172 }
173}
174
175static inline void
176vm_ci_dump(const struct rb_callinfo *ci)
177{
178 if (vm_ci_packed_p(ci)) {
179 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
180 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
181 }
182 else {
183 rp(ci);
184 }
185}
186
187#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
189
190/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
191#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
192 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
193 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
194 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
195 (kwarg) ? false : true)
196
197#define vm_ci_new_id(mid, flag, argc, must_zero) \
198 ((const struct rb_callinfo *) \
199 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
200 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
201 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
202 RUBY_FIXNUM_FLAG))
203
204// vm_method.c
205const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
206void rb_vm_ci_free(const struct rb_callinfo *);
207
208static inline const struct rb_callinfo *
209vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
210{
211 if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
212 RB_DEBUG_COUNTER_INC(ci_packed);
213 return vm_ci_new_id(mid, flag, argc, kwarg);
214 }
215
216 const bool debug = 0;
217 if (debug) ruby_debug_printf("%s:%d ", file, line);
218
219 const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
220
221 if (debug) rp(ci);
222 if (kwarg) {
223 RB_DEBUG_COUNTER_INC(ci_kw);
224 }
225 else {
226 RB_DEBUG_COUNTER_INC(ci_nokw);
227 }
228
229 VM_ASSERT(vm_ci_flag(ci) == flag);
230 VM_ASSERT(vm_ci_argc(ci) == argc);
231
232 return ci;
233}
234
235
236static inline const struct rb_callinfo *
237vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
238{
239 RB_DEBUG_COUNTER_INC(ci_runtime);
240 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
241}
242
243#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
244
245static inline bool
246vm_ci_markable(const struct rb_callinfo *ci)
247{
248 if (! ci) {
249 return false; /* or true? This is Qfalse... */
250 }
251 else if (vm_ci_packed_p(ci)) {
252 return true;
253 }
254 else {
255 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
256 return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
257 }
258}
259
260#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
261 (struct rb_callinfo) { \
262 .flags = T_IMEMO | \
263 (imemo_callinfo << FL_USHIFT) | \
264 VM_CALLINFO_NOT_UNDER_GC, \
265 .mid = mid_, \
266 .flag = flags_, \
267 .argc = argc_, \
268 .kwarg = kwarg_, \
269 }
270
271typedef VALUE (*vm_call_handler)(
273 struct rb_control_frame_struct *cfp,
274 struct rb_calling_info *calling);
275
276// imemo_callcache
277
279 const VALUE flags;
280
281 /* inline cache: key */
282 const VALUE klass; // Weak reference. When klass is collected, `cc->klass = Qundef`.
283
284 /* inline cache: values */
285 const struct rb_callable_method_entry_struct * const cme_;
286 const vm_call_handler call_;
287
288 union {
289 struct {
290 uint64_t value; // Shape ID in former half, index in latter half
291 } attr;
292 const enum method_missing_reason method_missing_reason; /* used by method_missing */
293 VALUE v;
294 const struct rb_builtin_function *bf;
295 } aux_;
296};
297
298/* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
299#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
300#define VM_CALLCACHE_BF IMEMO_FL_USER1
301#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
302#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
303#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER4
304#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER5
305#define VM_CALLCACHE_INVALID_SUPER IMEMO_FL_USER6
306
307enum vm_cc_type {
308 cc_type_normal, // chained from ccs
309 cc_type_super,
310 cc_type_refinement,
311};
312
313extern const struct rb_callcache *rb_vm_empty_cc(void);
314extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
315
316#define vm_cc_empty() rb_vm_empty_cc()
317
318static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
319
320static inline void
321vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
322{
323 vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
324}
325
326static inline VALUE
327cc_check_class(VALUE klass)
328{
329 VM_ASSERT(klass == Qundef || RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
330 return klass;
331}
332
333VALUE rb_vm_cc_table_create(size_t capa);
334VALUE rb_vm_cc_table_dup(VALUE old_table);
335void rb_vm_cc_table_delete(VALUE table, ID mid);
336
337static inline const struct rb_callcache *
338vm_cc_new(VALUE klass,
339 const struct rb_callable_method_entry_struct *cme,
340 vm_call_handler call,
341 enum vm_cc_type type)
342{
343 cc_check_class(klass);
344 struct rb_callcache *cc = SHAREABLE_IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
345 rb_gc_declare_weak_references((VALUE)cc);
346
347 *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
348 *((vm_call_handler *)&cc->call_) = call;
349
350 switch (type) {
351 case cc_type_normal:
352 break;
353 case cc_type_super:
354 *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
355 break;
356 case cc_type_refinement:
357 *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
358 rb_vm_insert_cc_refinement(cc);
359 break;
360 }
361
362 if (cme) {
363 if (cme->def->type == VM_METHOD_TYPE_ATTRSET || cme->def->type == VM_METHOD_TYPE_IVAR) {
364 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
365 }
366 }
367 else {
368 *(VALUE *)&cc->flags |= VM_CALLCACHE_INVALID_SUPER;
369 }
370
371 RB_DEBUG_COUNTER_INC(cc_new);
372 return cc;
373}
374
375static inline bool
376vm_cc_super_p(const struct rb_callcache *cc)
377{
378 return (cc->flags & VM_CALLCACHE_SUPER) != 0;
379}
380
381static inline bool
382vm_cc_refinement_p(const struct rb_callcache *cc)
383{
384 return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
385}
386
387#define VM_CC_ON_STACK(clazz, call, aux, cme) \
388 (struct rb_callcache) { \
389 .flags = T_IMEMO | \
390 (imemo_callcache << FL_USHIFT) | \
391 VM_CALLCACHE_UNMARKABLE | \
392 VM_CALLCACHE_ON_STACK, \
393 .klass = cc_check_class(clazz), \
394 .cme_ = cme, \
395 .call_ = call, \
396 .aux_ = aux, \
397 }
398
399static inline bool
400vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
401{
402 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
403 VM_ASSERT(cc_check_class(cc->klass));
404 return cc->klass == klass;
405}
406
407static inline int
408vm_cc_markable(const struct rb_callcache *cc)
409{
410 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
411 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
412}
413
414static inline bool
415vm_cc_invalid_super(const struct rb_callcache *cc)
416{
417 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
418 // Set when calling super and there is no superclass.
419 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_INVALID_SUPER);
420}
421
422static inline bool
423vm_cc_valid(const struct rb_callcache *cc)
424{
425 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
426 VM_ASSERT(cc_check_class(cc->klass));
427
428 return !UNDEF_P(cc->klass);
429}
430
431static inline const struct rb_callable_method_entry_struct *
432vm_cc_cme(const struct rb_callcache *cc)
433{
434 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
435 VM_ASSERT(cc->klass != Qundef || !vm_cc_markable(cc) || vm_cc_invalid_super(cc));
436 VM_ASSERT(cc_check_class(cc->klass));
437 VM_ASSERT(cc->call_ == NULL || // not initialized yet
438 !vm_cc_markable(cc) ||
439 vm_cc_invalid_super(cc) ||
440 cc->cme_ != NULL);
441
442 return cc->cme_;
443}
444
445static inline vm_call_handler
446vm_cc_call(const struct rb_callcache *cc)
447{
448 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
449 VM_ASSERT(cc->call_ != NULL);
450 VM_ASSERT(cc->klass != Qundef || !vm_cc_markable(cc) || vm_cc_invalid_super(cc));
451 VM_ASSERT(cc_check_class(cc->klass));
452 return cc->call_;
453}
454
455static inline void
456vm_unpack_shape_and_index(const uint64_t cache_value, shape_id_t *shape_id, attr_index_t *index)
457{
458 union rb_attr_index_cache cache = {
459 .pack = cache_value,
460 };
461 *shape_id = cache.unpack.shape_id;
462 *index = cache.unpack.index - 1;
463}
464
465static inline void
466vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t *shape_id, attr_index_t *index)
467{
468 vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(cc->aux_.attr.value), shape_id, index);
469}
470
471static inline void
472vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t *shape_id, attr_index_t *index)
473{
474 vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(ic->value), shape_id, index);
475}
476
477static inline unsigned int
478vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
479{
480 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
481 return cc->aux_.method_missing_reason;
482}
483
484static inline bool
485vm_cc_invalidated_p(const struct rb_callcache *cc)
486{
487 if (vm_cc_valid(cc) && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
488 return false;
489 }
490 else {
491 return true;
492 }
493}
494
495/* callcache: mutate */
496
497static inline void
498vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
499{
500 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
501 VM_ASSERT(cc != vm_cc_empty());
502 *(vm_call_handler *)&cc->call_ = call;
503}
504
505static inline void
506set_vm_cc_ivar(const struct rb_callcache *cc)
507{
508 *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
509}
510
511static inline uint64_t
512vm_pack_shape_and_index(shape_id_t shape_id, attr_index_t index)
513{
514 union rb_attr_index_cache cache = {
515 .unpack = {
516 .shape_id = shape_id,
517 .index = index + 1,
518 }
519 };
520 return cache.pack;
521}
522
523static inline void
524vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
525{
526 uint64_t *attr_value = (uint64_t *)&cc->aux_.attr.value;
527 if (!vm_cc_markable(cc)) {
528 *attr_value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET);
529 return;
530 }
531 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
532 VM_ASSERT(cc != vm_cc_empty());
533 *attr_value = vm_pack_shape_and_index(dest_shape_id, index);
534 set_vm_cc_ivar(cc);
535}
536
537static inline bool
538vm_cc_ivar_p(const struct rb_callcache *cc)
539{
540 return (cc->flags & VM_CALLCACHE_IVAR) != 0;
541}
542
543static inline void
544vm_ic_attr_index_set(const rb_iseq_t *iseq, struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
545{
546 ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(dest_shape_id, index));
547}
548
549static inline void
550vm_ic_attr_index_initialize(struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
551{
552 ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(shape_id, ATTR_INDEX_NOT_SET));
553}
554
555static inline void
556vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
557{
558 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
559 VM_ASSERT(cc != vm_cc_empty());
560 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
561}
562
563static inline void
564vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
565{
566 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
567 VM_ASSERT(cc != vm_cc_empty());
568 *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
569 *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
570}
571
572static inline bool
573vm_cc_bf_p(const struct rb_callcache *cc)
574{
575 return (cc->flags & VM_CALLCACHE_BF) != 0;
576}
577
578static inline void
579vm_cc_invalidate(const struct rb_callcache *cc)
580{
581 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
582 VM_ASSERT(cc != vm_cc_empty());
583 // TODO: rb_multi_ractor_p() is a workaround to stabilize CI
584 VM_ASSERT(cc->klass != Qundef || rb_multi_ractor_p()); // should be enable
585
586 *(VALUE *)&cc->klass = Qundef;
587 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
588}
589
590/* calldata */
591
593 const struct rb_callinfo *ci;
594 const struct rb_callcache *cc;
595};
596
598#if VM_CHECK_MODE > 0
599 VALUE debug_sig;
600#endif
601 int capa;
602 int len;
603 const struct rb_callable_method_entry_struct *cme;
605 unsigned int argc;
606 unsigned int flag;
607 const struct rb_callcache *cc;
608 } entries[FLEX_ARY_LEN];
609};
610
611static inline size_t
612vm_ccs_alloc_size(size_t capa)
613{
614 return offsetof(struct rb_class_cc_entries, entries) + (sizeof(struct rb_class_cc_entries_entry) * capa);
615}
616
617#if VM_CHECK_MODE > 0
618
619const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
620void rb_vm_dump_overloaded_cme_table(void);
621
622static inline bool
623vm_ccs_p(const struct rb_class_cc_entries *ccs)
624{
625 return ccs->debug_sig == ~(VALUE)ccs;
626}
627
628static inline bool
629vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
630{
631 bool valid;
632 RB_VM_LOCKING_NO_BARRIER() {
633 valid = vm_cc_cme(cc) == cme ||
634 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme));
635 }
636 if (valid) {
637 return true;
638 }
639#if 1
640 // debug print
641
642 fprintf(stderr, "iseq_overload:%d, cme:%p (def:%p), cm_cc_cme(cc):%p (def:%p)\n",
643 (int)cme->def->iseq_overload,
644 cme, cme->def,
645 vm_cc_cme(cc), vm_cc_cme(cc)->def);
646 rp(cme);
647 rp(vm_cc_cme(cc));
648 rp(rb_vm_lookup_overloaded_cme(cme));
649#endif
650 return false;
651}
652
653#endif
654
655#endif /* RUBY_VM_CALLINFO_H */
#define Qundef
Old name of RUBY_Qundef.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:130
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition fl_type.h:124
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
int capa
Designed capacity of the buffer.
Definition io.h:11
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition vm_core.h:288
Definition method.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376