Ruby 3.5.0dev (2025-09-19 revision 701e0975abc74defc4efe8363b465c46099079df)
imemo.c (701e0975abc74defc4efe8363b465c46099079df)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
9
10size_t rb_iseq_memsize(const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
12void rb_iseq_free(const rb_iseq_t *iseq);
13
14const char *
15rb_imemo_name(enum imemo_type type)
16{
17 // put no default case to get a warning if an imemo type is missing
18 switch (type) {
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
21 IMEMO_NAME(callinfo);
22 IMEMO_NAME(constcache);
23 IMEMO_NAME(cref);
24 IMEMO_NAME(env);
25 IMEMO_NAME(ifunc);
26 IMEMO_NAME(iseq);
27 IMEMO_NAME(memo);
28 IMEMO_NAME(ment);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32 IMEMO_NAME(fields);
33#undef IMEMO_NAME
34 }
35 rb_bug("unreachable");
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0, size_t size)
44{
46 NEWOBJ_OF(obj, void, v0, flags, size, 0);
47
48 return (VALUE)obj;
49}
50
52rb_imemo_tmpbuf_new(void)
53{
54 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
55 NEWOBJ_OF(obj, rb_imemo_tmpbuf_t, 0, flags, sizeof(rb_imemo_tmpbuf_t), NULL);
56
57 obj->ptr = NULL;
58 obj->cnt = 0;
59
60 return (VALUE)obj;
61}
62
63void *
64rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
65{
66 /* Keep the order; allocate an empty imemo first then xmalloc, to
67 * get rid of potential memory leak */
68 rb_imemo_tmpbuf_t *tmpbuf = (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new();
69 *store = (VALUE)tmpbuf;
70 void *ptr = ruby_xmalloc(size);
71 tmpbuf->ptr = ptr;
72 tmpbuf->cnt = cnt;
73
74 return ptr;
75}
76
77void *
78rb_alloc_tmp_buffer(volatile VALUE *store, long len)
79{
80 long cnt;
81
82 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
83 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
84 }
85
86 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
87}
88
89void
90rb_free_tmp_buffer(volatile VALUE *store)
91{
92 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
93 if (s) {
94 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
95 s->cnt = 0;
96 ruby_xfree(ptr);
97 }
98}
99
100static VALUE
101imemo_fields_new(VALUE owner, size_t capa)
102{
103 size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
104 if (rb_gc_size_allocatable_p(embedded_size)) {
105 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size);
106 RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
107 return fields;
108 }
109 else {
110 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields));
111 IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
112 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
113 return fields;
114 }
115}
116
117VALUE
118rb_imemo_fields_new(VALUE owner, size_t capa)
119{
120 return imemo_fields_new(owner, capa);
121}
122
123static VALUE
124imemo_fields_new_complex(VALUE owner, size_t capa)
125{
126 VALUE fields = imemo_fields_new(owner, 1);
127 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
128 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
129 return fields;
130}
131
132VALUE
133rb_imemo_fields_new_complex(VALUE owner, size_t capa)
134{
135 return imemo_fields_new_complex(owner, capa);
136}
137
138static int
139imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
140{
141 VALUE field_obj = (VALUE)arg;
142 RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
143 return ST_CONTINUE;
144}
145
146static int
147imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
148{
149 RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
150 return ST_CONTINUE;
151}
152
153VALUE
154rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl)
155{
156 VALUE fields = imemo_fields_new(owner, sizeof(struct rb_fields));
157 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
158 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
159 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
160 return fields;
161}
162
163VALUE
164rb_imemo_fields_clone(VALUE fields_obj)
165{
166 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
167 VALUE clone;
168
169 if (rb_shape_too_complex_p(shape_id)) {
170 clone = rb_imemo_fields_new_complex(rb_imemo_fields_owner(fields_obj), 0);
171 RBASIC_SET_SHAPE_ID(clone, shape_id);
172 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
173 st_table *dest_table = rb_imemo_fields_complex_tbl(clone);
174 st_replace(dest_table, src_table);
175 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
176 }
177 else {
178 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id));
179 RBASIC_SET_SHAPE_ID(clone, shape_id);
180 VALUE *fields = rb_imemo_fields_ptr(clone);
181 attr_index_t fields_count = RSHAPE_LEN(shape_id);
182 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
183 for (attr_index_t i = 0; i < fields_count; i++) {
184 RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
185 }
186 }
187
188 return clone;
189}
190
191void
192rb_imemo_fields_clear(VALUE fields_obj)
193{
194 // When replacing an imemo/fields by another one, we must clear
195 // its shape so that gc.c:obj_free_object_id won't be called.
196 if (rb_shape_obj_too_complex_p(fields_obj)) {
197 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
198 }
199 else {
200 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
201 }
202 // Invalidate the ec->gen_fields_cache.
203 RBASIC_CLEAR_CLASS(fields_obj);
204}
205
206/* =========================================================================
207 * memsize
208 * ========================================================================= */
209
210size_t
211rb_imemo_memsize(VALUE obj)
212{
213 size_t size = 0;
214 switch (imemo_type(obj)) {
215 case imemo_callcache:
216 break;
217 case imemo_callinfo:
218 break;
219 case imemo_constcache:
220 break;
221 case imemo_cref:
222 break;
223 case imemo_env:
224 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
225
226 break;
227 case imemo_ifunc:
228 break;
229 case imemo_iseq:
230 size += rb_iseq_memsize((rb_iseq_t *)obj);
231
232 break;
233 case imemo_memo:
234 break;
235 case imemo_ment:
236 size += sizeof(((rb_method_entry_t *)obj)->def);
237
238 break;
239 case imemo_svar:
240 break;
241 case imemo_throw_data:
242 break;
243 case imemo_tmpbuf:
244 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
245
246 break;
247 case imemo_fields:
248 if (FL_TEST_RAW(obj, OBJ_FIELD_HEAP)) {
249 if (rb_shape_obj_too_complex_p(obj)) {
250 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
251 }
252 else {
253 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
254 }
255 }
256 break;
257 default:
258 rb_bug("unreachable");
259 }
260
261 return size;
262}
263
264/* =========================================================================
265 * mark
266 * ========================================================================= */
267
268static bool
269moved_or_living_object_strictly_p(VALUE obj)
270{
271 return !SPECIAL_CONST_P(obj) && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
272}
273
274static void
275mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
276{
277 rb_method_definition_t *def = ment->def;
278
279 rb_gc_mark_and_move(&ment->owner);
280 rb_gc_mark_and_move(&ment->defined_class);
281
282 if (def) {
283 switch (def->type) {
284 case VM_METHOD_TYPE_ISEQ:
285 if (def->body.iseq.iseqptr) {
286 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
287 }
288 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
289
290 if (!reference_updating) {
291 if (def->iseq_overload && ment->defined_class) {
292 // it can be a key of "overloaded_cme" table
293 // so it should be pinned.
294 rb_gc_mark((VALUE)ment);
295 }
296 }
297 break;
298 case VM_METHOD_TYPE_ATTRSET:
299 case VM_METHOD_TYPE_IVAR:
300 rb_gc_mark_and_move(&def->body.attr.location);
301 break;
302 case VM_METHOD_TYPE_BMETHOD:
303 rb_gc_mark_and_move(&def->body.bmethod.proc);
304 if (def->body.bmethod.hooks) {
305 rb_hook_list_mark_and_move(def->body.bmethod.hooks);
306 }
307 break;
308 case VM_METHOD_TYPE_ALIAS:
309 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
310 return;
311 case VM_METHOD_TYPE_REFINED:
312 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
313 break;
314 case VM_METHOD_TYPE_CFUNC:
315 case VM_METHOD_TYPE_ZSUPER:
316 case VM_METHOD_TYPE_MISSING:
317 case VM_METHOD_TYPE_OPTIMIZED:
318 case VM_METHOD_TYPE_UNDEF:
319 case VM_METHOD_TYPE_NOTIMPLEMENTED:
320 break;
321 }
322 }
323}
324
325void
326rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
327{
328 switch (imemo_type(obj)) {
329 case imemo_callcache: {
330 /* cc is callcache.
331 *
332 * cc->klass (klass) should not be marked because if the klass is
333 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
334 *
335 * For "normal" CCs cc->cme (cme) should not be marked because the cc is
336 * invalidated through the klass when the cme is free'd.
337 * - klass marks cme if klass uses cme.
338 * - caller class's ccs->cme marks cc->cme.
339 * - if cc is invalidated (klass doesn't refer the cc), cc is
340 * invalidated by `vm_cc_invalidate()` after which cc->cme must not
341 * be accessed.
342 * - With multi-Ractors, cme will be collected with global GC
343 * so that it is safe if GC is not interleaving while accessing
344 * cc and cme.
345 *
346 * However cc_type_super and cc_type_refinement are not chained
347 * from ccs so cc->cme should be marked as long as the cc is valid;
348 * the cme might be reachable only through cc in these cases.
349 */
350 struct rb_callcache *cc = (struct rb_callcache *)obj;
351 if (UNDEF_P(cc->klass)) {
352 /* If it's invalidated, we must not mark anything.
353 * All fields should are considered invalid
354 */
355 }
356 else if (reference_updating) {
357 if (moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
358 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
359 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
360 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
361
362 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
363 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
364 }
365 else {
366 vm_cc_invalidate(cc);
367 }
368 }
369 else {
370 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
371 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
372
373 rb_gc_mark_weak((VALUE *)&cc->klass);
374 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
375 rb_gc_mark_movable((VALUE)cc->cme_);
376 }
377 }
378
379 break;
380 }
381 case imemo_callinfo:
382 break;
383 case imemo_constcache: {
385
386 rb_gc_mark_and_move(&ice->value);
387
388 break;
389 }
390 case imemo_cref: {
391 rb_cref_t *cref = (rb_cref_t *)obj;
392
393 rb_gc_mark_and_move(&cref->klass_or_self);
394 rb_gc_mark_and_move_ptr(&cref->next);
395 rb_gc_mark_and_move(&cref->refinements);
396
397 break;
398 }
399 case imemo_env: {
400 rb_env_t *env = (rb_env_t *)obj;
401
402 if (LIKELY(env->ep)) {
403 // just after newobj() can be NULL here.
404 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
405 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
406
407 for (unsigned int i = 0; i < env->env_size; i++) {
408 rb_gc_mark_and_move((VALUE *)&env->env[i]);
409 }
410
411 rb_gc_mark_and_move_ptr(&env->iseq);
412
413 if (reference_updating) {
414 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
415 }
416 else {
417 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
418 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
419 }
420 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
421 }
422 }
423
424 break;
425 }
426 case imemo_ifunc: {
427 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
428
429 if (!reference_updating) {
430 rb_gc_mark_maybe((VALUE)ifunc->data);
431 }
432
433 break;
434 }
435 case imemo_iseq:
436 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
437 break;
438 case imemo_memo: {
439 struct MEMO *memo = (struct MEMO *)obj;
440
441 rb_gc_mark_and_move((VALUE *)&memo->v1);
442 rb_gc_mark_and_move((VALUE *)&memo->v2);
443 if (!reference_updating) {
444 rb_gc_mark_maybe(memo->u3.value);
445 }
446
447 break;
448 }
449 case imemo_ment:
450 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
451 break;
452 case imemo_svar: {
453 struct vm_svar *svar = (struct vm_svar *)obj;
454
455 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
456 rb_gc_mark_and_move((VALUE *)&svar->lastline);
457 rb_gc_mark_and_move((VALUE *)&svar->backref);
458 rb_gc_mark_and_move((VALUE *)&svar->others);
459
460 break;
461 }
462 case imemo_throw_data: {
463 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
464
465 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
466
467 break;
468 }
469 case imemo_tmpbuf: {
470 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
471
472 if (!reference_updating) {
473 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
474 }
475
476 break;
477 }
478 case imemo_fields: {
479 rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
480
481 if (rb_shape_obj_too_complex_p(obj)) {
482 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
483 if (reference_updating) {
484 rb_gc_ref_update_table_values_only(tbl);
485 }
486 else {
487 rb_mark_tbl_no_pin(tbl);
488 }
489 }
490 else {
491 VALUE *fields = rb_imemo_fields_ptr(obj);
492 attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
493 for (attr_index_t i = 0; i < len; i++) {
494 rb_gc_mark_and_move(&fields[i]);
495 }
496 }
497 break;
498 }
499 default:
500 rb_bug("unreachable");
501 }
502}
503
504/* =========================================================================
505 * free
506 * ========================================================================= */
507
508static enum rb_id_table_iterator_result
509free_const_entry_i(VALUE value, void *data)
510{
511 rb_const_entry_t *ce = (rb_const_entry_t *)value;
512 xfree(ce);
513 return ID_TABLE_CONTINUE;
514}
515
516void
517rb_free_const_table(struct rb_id_table *tbl)
518{
519 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
520 rb_id_table_free(tbl);
521}
522
523static inline void
524imemo_fields_free(struct rb_fields *fields)
525{
526 if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_HEAP)) {
527 if (rb_shape_obj_too_complex_p((VALUE)fields)) {
528 st_free_table(fields->as.complex.table);
529 }
530 else {
531 xfree(fields->as.external.ptr);
532 }
533 }
534}
535
536void
537rb_imemo_free(VALUE obj)
538{
539 switch (imemo_type(obj)) {
540 case imemo_callcache:
541 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
542
543 break;
544 case imemo_callinfo:{
545 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
546
547 if (ci->kwarg) {
548 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
549 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
550 }
551 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
552
553 break;
554 }
555 case imemo_constcache:
556 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
557
558 break;
559 case imemo_cref:
560 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
561
562 break;
563 case imemo_env: {
564 rb_env_t *env = (rb_env_t *)obj;
565
566 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
567 xfree((VALUE *)env->env);
568 RB_DEBUG_COUNTER_INC(obj_imemo_env);
569
570 break;
571 }
572 case imemo_ifunc:
573 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
574 break;
575 case imemo_iseq:
576 rb_iseq_free((rb_iseq_t *)obj);
577 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
578
579 break;
580 case imemo_memo:
581 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
582
583 break;
584 case imemo_ment:
585 rb_free_method_entry((rb_method_entry_t *)obj);
586 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
587
588 break;
589 case imemo_svar:
590 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
591
592 break;
593 case imemo_throw_data:
594 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
595
596 break;
597 case imemo_tmpbuf:
598 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
599 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
600
601 break;
602 case imemo_fields:
603 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
604 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
605 break;
606 default:
607 rb_bug("unreachable");
608 }
609}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
MEMO.
Definition imemo.h:104
Definition vm_core.h:261
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376