Ruby 3.5.0dev (2025-07-15 revision b2a7b7699261d2a4ef8a9d5d38d3fb9dc99c8253)
imemo.c (b2a7b7699261d2a4ef8a9d5d38d3fb9dc99c8253)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "internal/st.h"
7#include "vm_callinfo.h"
8
9size_t rb_iseq_memsize(const rb_iseq_t *iseq);
10void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
11void rb_iseq_free(const rb_iseq_t *iseq);
12
13const char *
14rb_imemo_name(enum imemo_type type)
15{
16 // put no default case to get a warning if an imemo type is missing
17 switch (type) {
18#define IMEMO_NAME(x) case imemo_##x: return #x;
19 IMEMO_NAME(ast);
20 IMEMO_NAME(callcache);
21 IMEMO_NAME(callinfo);
22 IMEMO_NAME(constcache);
23 IMEMO_NAME(cref);
24 IMEMO_NAME(env);
25 IMEMO_NAME(ifunc);
26 IMEMO_NAME(iseq);
27 IMEMO_NAME(memo);
28 IMEMO_NAME(ment);
29 IMEMO_NAME(parser_strterm);
30 IMEMO_NAME(svar);
31 IMEMO_NAME(throw_data);
32 IMEMO_NAME(tmpbuf);
33 IMEMO_NAME(fields);
34#undef IMEMO_NAME
35 }
36 rb_bug("unreachable");
37}
38
39/* =========================================================================
40 * allocation
41 * ========================================================================= */
42
44rb_imemo_new(enum imemo_type type, VALUE v0, size_t size)
45{
47 NEWOBJ_OF(obj, void, v0, flags, size, 0);
48
49 return (VALUE)obj;
50}
51
52static rb_imemo_tmpbuf_t *
53rb_imemo_tmpbuf_new(void)
54{
55 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
56 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
57 NEWOBJ_OF(obj, struct rb_imemo_tmpbuf_struct, 0, flags, size, 0);
58
59 return obj;
60}
61
62void *
63rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
64{
65 void *ptr;
66 rb_imemo_tmpbuf_t *tmpbuf;
67
68 /* Keep the order; allocate an empty imemo first then xmalloc, to
69 * get rid of potential memory leak */
70 tmpbuf = rb_imemo_tmpbuf_new();
71 *store = (VALUE)tmpbuf;
72 ptr = ruby_xmalloc(size);
73 tmpbuf->ptr = ptr;
74 tmpbuf->cnt = cnt;
75
76 return ptr;
77}
78
79void *
80rb_alloc_tmp_buffer(volatile VALUE *store, long len)
81{
82 long cnt;
83
84 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
85 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
86 }
87
88 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
89}
90
91void
92rb_free_tmp_buffer(volatile VALUE *store)
93{
94 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
95 if (s) {
96 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
97 s->cnt = 0;
98 ruby_xfree(ptr);
99 }
100}
101
103rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
104{
105 rb_imemo_tmpbuf_t *tmpbuf = rb_imemo_tmpbuf_new();
106 tmpbuf->ptr = buf;
107 tmpbuf->next = old_heap;
108 tmpbuf->cnt = cnt;
109
110 return tmpbuf;
111}
112
113static VALUE
114imemo_fields_new(VALUE klass, size_t capa)
115{
116 size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
117 if (rb_gc_size_allocatable_p(embedded_size)) {
118 VALUE fields = rb_imemo_new(imemo_fields, klass, embedded_size);
119 RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
120 return fields;
121 }
122 else {
123 VALUE fields = rb_imemo_new(imemo_fields, klass, sizeof(struct rb_fields));
124 FL_SET_RAW(fields, OBJ_FIELD_EXTERNAL);
125 IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
126 return fields;
127 }
128}
129
130VALUE
131rb_imemo_fields_new(VALUE klass, size_t capa)
132{
133 return imemo_fields_new(klass, capa);
134}
135
136static VALUE
137imemo_fields_new_complex(VALUE klass, size_t capa)
138{
139 VALUE fields = imemo_fields_new(klass, sizeof(struct rb_fields));
140 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
141 return fields;
142}
143
144VALUE
145rb_imemo_fields_new_complex(VALUE klass, size_t capa)
146{
147 return imemo_fields_new_complex(klass, capa);
148}
149
150static int
151imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
152{
153 VALUE field_obj = (VALUE)arg;
154 RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
155 return ST_CONTINUE;
156}
157
158static int
159imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
160{
161 RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
162 return ST_CONTINUE;
163}
164
165VALUE
166rb_imemo_fields_new_complex_tbl(VALUE klass, st_table *tbl)
167{
168 VALUE fields = imemo_fields_new(klass, sizeof(struct rb_fields));
169 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
170 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
171 return fields;
172}
173
174VALUE
175rb_imemo_fields_clone(VALUE fields_obj)
176{
177 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
178 VALUE clone;
179
180 if (rb_shape_too_complex_p(shape_id)) {
181 clone = rb_imemo_fields_new_complex(CLASS_OF(fields_obj), 0);
182 RBASIC_SET_SHAPE_ID(clone, shape_id);
183 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
184 st_table *dest_table = rb_imemo_fields_complex_tbl(clone);
185 st_replace(dest_table, src_table);
186 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
187 }
188 else {
189 clone = imemo_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id));
190 RBASIC_SET_SHAPE_ID(clone, shape_id);
191 VALUE *fields = rb_imemo_fields_ptr(clone);
192 attr_index_t fields_count = RSHAPE_LEN(shape_id);
193 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
194 for (attr_index_t i = 0; i < fields_count; i++) {
195 RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
196 }
197 }
198
199 return clone;
200}
201
202void
203rb_imemo_fields_clear(VALUE fields_obj)
204{
205 // When replacing an imemo/fields by another one, we must clear
206 // its shape so that gc.c:obj_free_object_id won't be called.
207 if (rb_shape_obj_too_complex_p(fields_obj)) {
208 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
209 }
210 else {
211 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
212 }
213}
214
215/* =========================================================================
216 * memsize
217 * ========================================================================= */
218
219size_t
220rb_imemo_memsize(VALUE obj)
221{
222 size_t size = 0;
223 switch (imemo_type(obj)) {
224 case imemo_ast:
225 rb_bug("imemo_ast is obsolete");
226
227 break;
228 case imemo_callcache:
229 break;
230 case imemo_callinfo:
231 break;
232 case imemo_constcache:
233 break;
234 case imemo_cref:
235 break;
236 case imemo_env:
237 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
238
239 break;
240 case imemo_ifunc:
241 break;
242 case imemo_iseq:
243 size += rb_iseq_memsize((rb_iseq_t *)obj);
244
245 break;
246 case imemo_memo:
247 break;
248 case imemo_ment:
249 size += sizeof(((rb_method_entry_t *)obj)->def);
250
251 break;
252 case imemo_parser_strterm:
253 break;
254 case imemo_svar:
255 break;
256 case imemo_throw_data:
257 break;
258 case imemo_tmpbuf:
259 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
260
261 break;
262 case imemo_fields:
263 if (rb_shape_obj_too_complex_p(obj)) {
264 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
265 }
266 else if (FL_TEST_RAW(obj, OBJ_FIELD_EXTERNAL)) {
267 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
268 }
269 break;
270 default:
271 rb_bug("unreachable");
272 }
273
274 return size;
275}
276
277/* =========================================================================
278 * mark
279 * ========================================================================= */
280
281static bool
282moved_or_living_object_strictly_p(VALUE obj)
283{
284 return obj && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
285}
286
287static void
288mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
289{
290 rb_method_definition_t *def = ment->def;
291
292 rb_gc_mark_and_move(&ment->owner);
293 rb_gc_mark_and_move(&ment->defined_class);
294
295 if (def) {
296 switch (def->type) {
297 case VM_METHOD_TYPE_ISEQ:
298 if (def->body.iseq.iseqptr) {
299 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
300 }
301 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
302
303 if (!reference_updating) {
304 if (def->iseq_overload && ment->defined_class) {
305 // it can be a key of "overloaded_cme" table
306 // so it should be pinned.
307 rb_gc_mark((VALUE)ment);
308 }
309 }
310 break;
311 case VM_METHOD_TYPE_ATTRSET:
312 case VM_METHOD_TYPE_IVAR:
313 rb_gc_mark_and_move(&def->body.attr.location);
314 break;
315 case VM_METHOD_TYPE_BMETHOD:
316 rb_gc_mark_and_move(&def->body.bmethod.proc);
317 if (!reference_updating) {
318 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
319 }
320 break;
321 case VM_METHOD_TYPE_ALIAS:
322 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
323 return;
324 case VM_METHOD_TYPE_REFINED:
325 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
326 break;
327 case VM_METHOD_TYPE_CFUNC:
328 case VM_METHOD_TYPE_ZSUPER:
329 case VM_METHOD_TYPE_MISSING:
330 case VM_METHOD_TYPE_OPTIMIZED:
331 case VM_METHOD_TYPE_UNDEF:
332 case VM_METHOD_TYPE_NOTIMPLEMENTED:
333 break;
334 }
335 }
336}
337
338void
339rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
340{
341 switch (imemo_type(obj)) {
342 case imemo_ast:
343 rb_bug("imemo_ast is obsolete");
344
345 break;
346 case imemo_callcache: {
347 /* cc is callcache.
348 *
349 * cc->klass (klass) should not be marked because if the klass is
350 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
351 *
352 * cc->cme (cme) should not be marked because if cc is invalidated
353 * when cme is free'ed.
354 * - klass marks cme if klass uses cme.
355 * - caller classe's ccs->cme marks cc->cme.
356 * - if cc is invalidated (klass doesn't refer the cc),
357 * cc is invalidated by `vm_cc_invalidate()` and cc->cme is
358 * not be accessed.
359 * - On the multi-Ractors, cme will be collected with global GC
360 * so that it is safe if GC is not interleaving while accessing
361 * cc and cme.
362 * - However, cc_type_super and cc_type_refinement are not chained
363 * from ccs so cc->cme should be marked; the cme might be
364 * reachable only through cc in these cases.
365 */
366 struct rb_callcache *cc = (struct rb_callcache *)obj;
367 if (reference_updating) {
368 if (!cc->klass) {
369 // already invalidated
370 }
371 else {
372 if (moved_or_living_object_strictly_p(cc->klass) &&
373 moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
374 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
375 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
376 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
377 }
378 else {
379 vm_cc_invalidate(cc);
380 }
381 }
382 }
383 else {
384 if (cc->klass && (vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
385 rb_gc_mark_movable((VALUE)cc->cme_);
386 rb_gc_mark_movable((VALUE)cc->klass);
387 }
388 }
389
390 break;
391 }
392 case imemo_callinfo:
393 break;
394 case imemo_constcache: {
396
397 rb_gc_mark_and_move(&ice->value);
398
399 break;
400 }
401 case imemo_cref: {
402 rb_cref_t *cref = (rb_cref_t *)obj;
403
404 rb_gc_mark_and_move(&cref->klass_or_self);
405 rb_gc_mark_and_move_ptr(&cref->next);
406 rb_gc_mark_and_move(&cref->refinements);
407
408 break;
409 }
410 case imemo_env: {
411 rb_env_t *env = (rb_env_t *)obj;
412
413 if (LIKELY(env->ep)) {
414 // just after newobj() can be NULL here.
415 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
416 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
417
418 for (unsigned int i = 0; i < env->env_size; i++) {
419 rb_gc_mark_and_move((VALUE *)&env->env[i]);
420 }
421
422 rb_gc_mark_and_move_ptr(&env->iseq);
423
424 if (reference_updating) {
425 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
426 }
427 else {
428 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
429 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
430 }
431 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
432 }
433 }
434
435 break;
436 }
437 case imemo_ifunc: {
438 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
439
440 if (!reference_updating) {
441 rb_gc_mark_maybe((VALUE)ifunc->data);
442 }
443
444 break;
445 }
446 case imemo_iseq:
447 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
448 break;
449 case imemo_memo: {
450 struct MEMO *memo = (struct MEMO *)obj;
451
452 rb_gc_mark_and_move((VALUE *)&memo->v1);
453 rb_gc_mark_and_move((VALUE *)&memo->v2);
454 if (!reference_updating) {
455 rb_gc_mark_maybe(memo->u3.value);
456 }
457
458 break;
459 }
460 case imemo_ment:
461 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
462 break;
463 case imemo_parser_strterm:
464 break;
465 case imemo_svar: {
466 struct vm_svar *svar = (struct vm_svar *)obj;
467
468 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
469 rb_gc_mark_and_move((VALUE *)&svar->lastline);
470 rb_gc_mark_and_move((VALUE *)&svar->backref);
471 rb_gc_mark_and_move((VALUE *)&svar->others);
472
473 break;
474 }
475 case imemo_throw_data: {
476 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
477
478 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
479
480 break;
481 }
482 case imemo_tmpbuf: {
483 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
484
485 if (!reference_updating) {
486 do {
487 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
488 } while ((m = m->next) != NULL);
489 }
490
491 break;
492 }
493 case imemo_fields: {
494 rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
495
496 if (rb_shape_obj_too_complex_p(obj)) {
497 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
498 if (reference_updating) {
499 rb_gc_ref_update_table_values_only(tbl);
500 }
501 else {
502 rb_mark_tbl_no_pin(tbl);
503 }
504 }
505 else {
506 VALUE *fields = rb_imemo_fields_ptr(obj);
507 attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
508 for (attr_index_t i = 0; i < len; i++) {
509 rb_gc_mark_and_move(&fields[i]);
510 }
511 }
512 break;
513 }
514 default:
515 rb_bug("unreachable");
516 }
517}
518
519/* =========================================================================
520 * free
521 * ========================================================================= */
522
523static enum rb_id_table_iterator_result
524free_const_entry_i(VALUE value, void *data)
525{
526 rb_const_entry_t *ce = (rb_const_entry_t *)value;
527 xfree(ce);
528 return ID_TABLE_CONTINUE;
529}
530
531void
532rb_free_const_table(struct rb_id_table *tbl)
533{
534 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
535 rb_id_table_free(tbl);
536}
537
538// alive: if false, target pointers can be freed already.
539static void
540vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass)
541{
542 if (ccs->entries) {
543 for (int i=0; i<ccs->len; i++) {
544 const struct rb_callcache *cc = ccs->entries[i].cc;
545 if (!alive) {
546 // ccs can be free'ed.
547 if (rb_gc_pointer_to_heap_p((VALUE)cc) &&
548 !rb_objspace_garbage_object_p((VALUE)cc) &&
549 IMEMO_TYPE_P(cc, imemo_callcache) &&
550 cc->klass == klass) {
551 // OK. maybe target cc.
552 }
553 else {
554 continue;
555 }
556 }
557
558 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
559 vm_cc_invalidate(cc);
560 }
561 ruby_xfree(ccs->entries);
562 }
563 ruby_xfree(ccs);
564}
565
566void
567rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
568{
569 RB_DEBUG_COUNTER_INC(ccs_free);
570 vm_ccs_free(ccs, true, Qundef);
571}
572
573static enum rb_id_table_iterator_result
574cc_tbl_free_i(VALUE ccs_ptr, void *data)
575{
576 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
577 VALUE klass = (VALUE)data;
578 VM_ASSERT(vm_ccs_p(ccs));
579
580 vm_ccs_free(ccs, false, klass);
581
582 return ID_TABLE_CONTINUE;
583}
584
585void
586rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass)
587{
588 if (!cc_tbl) return;
589 rb_id_table_foreach_values(cc_tbl, cc_tbl_free_i, (void *)klass);
590 rb_id_table_free(cc_tbl);
591}
592
593static inline void
594imemo_fields_free(struct rb_fields *fields)
595{
596 if (rb_shape_obj_too_complex_p((VALUE)fields)) {
597 st_free_table(fields->as.complex.table);
598 }
599 else if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_EXTERNAL)) {
600 xfree(fields->as.external.ptr);
601 }
602}
603
604void
605rb_imemo_free(VALUE obj)
606{
607 switch (imemo_type(obj)) {
608 case imemo_ast:
609 rb_bug("imemo_ast is obsolete");
610
611 break;
612 case imemo_callcache:
613 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
614
615 break;
616 case imemo_callinfo:{
617 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
618
619 if (ci->kwarg) {
620 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
621 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
622 }
623 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
624
625 break;
626 }
627 case imemo_constcache:
628 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
629
630 break;
631 case imemo_cref:
632 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
633
634 break;
635 case imemo_env: {
636 rb_env_t *env = (rb_env_t *)obj;
637
638 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
639 xfree((VALUE *)env->env);
640 RB_DEBUG_COUNTER_INC(obj_imemo_env);
641
642 break;
643 }
644 case imemo_ifunc:
645 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
646 break;
647 case imemo_iseq:
648 rb_iseq_free((rb_iseq_t *)obj);
649 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
650
651 break;
652 case imemo_memo:
653 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
654
655 break;
656 case imemo_ment:
657 rb_free_method_entry((rb_method_entry_t *)obj);
658 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
659
660 break;
661 case imemo_parser_strterm:
662 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
663
664 break;
665 case imemo_svar:
666 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
667
668 break;
669 case imemo_throw_data:
670 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
671
672 break;
673 case imemo_tmpbuf:
674 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
675 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
676
677 break;
678 case imemo_fields:
679 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
680 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
681 break;
682 default:
683 rb_bug("unreachable");
684 }
685}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
MEMO.
Definition imemo.h:108
Definition vm_core.h:261
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:137
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:87
SVAR (Special VARiable)
Definition imemo.h:51
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:53
THROW_DATA.
Definition imemo.h:60
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40