Ruby 4.1.0dev (2026-01-04 revision a0c483fcfb9b8a2009cf21a8bce5fa2ad54d4fda)
imemo.c (a0c483fcfb9b8a2009cf21a8bce5fa2ad54d4fda)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
9
10size_t rb_iseq_memsize(const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
12void rb_iseq_free(const rb_iseq_t *iseq);
13
14const char *
15rb_imemo_name(enum imemo_type type)
16{
17 // put no default case to get a warning if an imemo type is missing
18 switch (type) {
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
21 IMEMO_NAME(callinfo);
22 IMEMO_NAME(constcache);
23 IMEMO_NAME(cref);
24 IMEMO_NAME(env);
25 IMEMO_NAME(ifunc);
26 IMEMO_NAME(iseq);
27 IMEMO_NAME(memo);
28 IMEMO_NAME(ment);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32 IMEMO_NAME(fields);
33#undef IMEMO_NAME
34 }
35 rb_bug("unreachable");
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0, size_t size, bool is_shareable)
44{
45 VALUE flags = T_IMEMO | FL_WB_PROTECTED | (type << FL_USHIFT) | (is_shareable ? FL_SHAREABLE : 0);
46 NEWOBJ_OF(obj, void, v0, flags, size, 0);
47
48 return (VALUE)obj;
49}
50
52rb_imemo_tmpbuf_new(void)
53{
54 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
55 NEWOBJ_OF(obj, rb_imemo_tmpbuf_t, 0, flags, sizeof(rb_imemo_tmpbuf_t), NULL);
56
57 rb_gc_register_pinning_obj((VALUE)obj);
58
59 obj->ptr = NULL;
60 obj->cnt = 0;
61
62 return (VALUE)obj;
63}
64
65void *
66rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
67{
68 /* Keep the order; allocate an empty imemo first then xmalloc, to
69 * get rid of potential memory leak */
70 rb_imemo_tmpbuf_t *tmpbuf = (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new();
71 *store = (VALUE)tmpbuf;
72 void *ptr = ruby_xmalloc(size);
73 tmpbuf->ptr = ptr;
74 tmpbuf->cnt = cnt;
75
76 return ptr;
77}
78
79void *
80rb_alloc_tmp_buffer(volatile VALUE *store, long len)
81{
82 long cnt;
83
84 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
85 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
86 }
87
88 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
89}
90
91void
92rb_free_tmp_buffer(volatile VALUE *store)
93{
94 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
95 if (s) {
96 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
97 s->cnt = 0;
98 ruby_xfree(ptr);
99 }
100}
101
102struct MEMO *
103rb_imemo_memo_new(VALUE a, VALUE b, VALUE c)
104{
105 struct MEMO *memo = IMEMO_NEW(struct MEMO, imemo_memo, 0);
106
107 rb_gc_register_pinning_obj((VALUE)memo);
108
109 *((VALUE *)&memo->v1) = a;
110 *((VALUE *)&memo->v2) = b;
111 *((VALUE *)&memo->u3.value) = c;
112
113 return memo;
114}
115
116static VALUE
117imemo_fields_new(VALUE owner, size_t capa, bool shareable)
118{
119 size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
120 if (rb_gc_size_allocatable_p(embedded_size)) {
121 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
122 RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
123 return fields;
124 }
125 else {
126 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
127 IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
128 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
129 return fields;
130 }
131}
132
133VALUE
134rb_imemo_fields_new(VALUE owner, size_t capa, bool shareable)
135{
136 return imemo_fields_new(owner, capa, shareable);
137}
138
139static VALUE
140imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
141{
142 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
143 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
144 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
145 return fields;
146}
147
148VALUE
149rb_imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
150{
151 return imemo_fields_new_complex(owner, capa, shareable);
152}
153
154static int
155imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
156{
157 VALUE field_obj = (VALUE)arg;
158 RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
159 return ST_CONTINUE;
160}
161
162static int
163imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
164{
165 RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
166 return ST_CONTINUE;
167}
168
169VALUE
170rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl, bool shareable)
171{
172 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
173 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
174 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
175 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
176 return fields;
177}
178
179VALUE
180rb_imemo_fields_clone(VALUE fields_obj)
181{
182 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
183 VALUE clone;
184
185 if (rb_shape_too_complex_p(shape_id)) {
186 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
187
188 st_table *dest_table = xcalloc(1, sizeof(st_table));
189 clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table, false /* TODO: check */);
190
191 st_replace(dest_table, src_table);
192 RBASIC_SET_SHAPE_ID(clone, shape_id);
193
194 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
195 }
196 else {
197 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id), false /* TODO: check */);
198 RBASIC_SET_SHAPE_ID(clone, shape_id);
199 VALUE *fields = rb_imemo_fields_ptr(clone);
200 attr_index_t fields_count = RSHAPE_LEN(shape_id);
201 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
202 for (attr_index_t i = 0; i < fields_count; i++) {
203 RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
204 }
205 }
206
207 return clone;
208}
209
210void
211rb_imemo_fields_clear(VALUE fields_obj)
212{
213 // When replacing an imemo/fields by another one, we must clear
214 // its shape so that gc.c:obj_free_object_id won't be called.
215 if (rb_shape_obj_too_complex_p(fields_obj)) {
216 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
217 }
218 else {
219 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
220 }
221 // Invalidate the ec->gen_fields_cache.
222 RBASIC_CLEAR_CLASS(fields_obj);
223}
224
225/* =========================================================================
226 * memsize
227 * ========================================================================= */
228
229size_t
230rb_imemo_memsize(VALUE obj)
231{
232 size_t size = 0;
233 switch (imemo_type(obj)) {
234 case imemo_callcache:
235 break;
236 case imemo_callinfo:
237 break;
238 case imemo_constcache:
239 break;
240 case imemo_cref:
241 break;
242 case imemo_env:
243 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
244
245 break;
246 case imemo_ifunc:
247 break;
248 case imemo_iseq:
249 size += rb_iseq_memsize((rb_iseq_t *)obj);
250
251 break;
252 case imemo_memo:
253 break;
254 case imemo_ment:
255 size += sizeof(((rb_method_entry_t *)obj)->def);
256
257 break;
258 case imemo_svar:
259 break;
260 case imemo_throw_data:
261 break;
262 case imemo_tmpbuf:
263 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
264
265 break;
266 case imemo_fields:
267 if (FL_TEST_RAW(obj, OBJ_FIELD_HEAP)) {
268 if (rb_shape_obj_too_complex_p(obj)) {
269 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
270 }
271 else {
272 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
273 }
274 }
275 break;
276 default:
277 rb_bug("unreachable");
278 }
279
280 return size;
281}
282
283/* =========================================================================
284 * mark
285 * ========================================================================= */
286
287static bool
288moved_or_living_object_strictly_p(VALUE obj)
289{
290 return !SPECIAL_CONST_P(obj) && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
291}
292
293static void
294mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
295{
296 rb_method_definition_t *def = ment->def;
297
298 rb_gc_mark_and_move(&ment->owner);
299 rb_gc_mark_and_move(&ment->defined_class);
300
301 if (def) {
302 switch (def->type) {
303 case VM_METHOD_TYPE_ISEQ:
304 if (def->body.iseq.iseqptr) {
305 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
306 }
307 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
308
309 if (!reference_updating) {
310 if (def->iseq_overload && ment->defined_class) {
311 // it can be a key of "overloaded_cme" table
312 // so it should be pinned.
313 rb_gc_mark((VALUE)ment);
314 }
315 }
316 break;
317 case VM_METHOD_TYPE_ATTRSET:
318 case VM_METHOD_TYPE_IVAR:
319 rb_gc_mark_and_move(&def->body.attr.location);
320 break;
321 case VM_METHOD_TYPE_BMETHOD:
322 if (!rb_gc_checking_shareable()) {
323 rb_gc_mark_and_move(&def->body.bmethod.proc);
324 }
325 break;
326 case VM_METHOD_TYPE_ALIAS:
327 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
328 return;
329 case VM_METHOD_TYPE_REFINED:
330 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
331 break;
332 case VM_METHOD_TYPE_CFUNC:
333 case VM_METHOD_TYPE_ZSUPER:
334 case VM_METHOD_TYPE_MISSING:
335 case VM_METHOD_TYPE_OPTIMIZED:
336 case VM_METHOD_TYPE_UNDEF:
337 case VM_METHOD_TYPE_NOTIMPLEMENTED:
338 break;
339 }
340 }
341}
342
343void
344rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
345{
346 switch (imemo_type(obj)) {
347 case imemo_callcache: {
348 /* cc is callcache.
349 *
350 * cc->klass (klass) should not be marked because if the klass is
351 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
352 *
353 * For "normal" CCs cc->cme (cme) should not be marked because the cc is
354 * invalidated through the klass when the cme is free'd.
355 * - klass marks cme if klass uses cme.
356 * - caller class's ccs->cme marks cc->cme.
357 * - if cc is invalidated (klass doesn't refer the cc), cc is
358 * invalidated by `vm_cc_invalidate()` after which cc->cme must not
359 * be accessed.
360 * - With multi-Ractors, cme will be collected with global GC
361 * so that it is safe if GC is not interleaving while accessing
362 * cc and cme.
363 *
364 * However cc_type_super and cc_type_refinement are not chained
365 * from ccs so cc->cme should be marked as long as the cc is valid;
366 * the cme might be reachable only through cc in these cases.
367 */
368 struct rb_callcache *cc = (struct rb_callcache *)obj;
369 if (UNDEF_P(cc->klass)) {
370 /* If it's invalidated, we must not mark anything.
371 * All fields should are considered invalid
372 */
373 }
374 else if (reference_updating) {
375 if (moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
376 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
377 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
378 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
379
380 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
381 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
382 }
383 else {
384 vm_cc_invalidate(cc);
385 }
386 }
387 else {
388 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
389 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
390
391 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
392 rb_gc_mark_movable((VALUE)cc->cme_);
393 }
394 }
395
396 break;
397 }
398 case imemo_callinfo:
399 break;
400 case imemo_constcache: {
402
403 if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
404 !rb_gc_checking_shareable()) {
405 rb_gc_mark_and_move(&ice->value);
406 }
407
408 break;
409 }
410 case imemo_cref: {
411 rb_cref_t *cref = (rb_cref_t *)obj;
412
413 if (!rb_gc_checking_shareable()) {
414 // cref->klass_or_self can be unshareable, but no way to access it from other ractors
415 rb_gc_mark_and_move(&cref->klass_or_self);
416 }
417
418 rb_gc_mark_and_move_ptr(&cref->next);
419
420 // TODO: Ractor and refeinements are not resolved yet
421 if (!rb_gc_checking_shareable()) {
422 rb_gc_mark_and_move(&cref->refinements);
423 }
424
425 break;
426 }
427 case imemo_env: {
428 rb_env_t *env = (rb_env_t *)obj;
429
430 if (LIKELY(env->ep)) {
431 // just after newobj() can be NULL here.
432 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
433 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
434
435 for (unsigned int i = 0; i < env->env_size; i++) {
436 rb_gc_mark_and_move((VALUE *)&env->env[i]);
437 }
438
439 rb_gc_mark_and_move_ptr(&env->iseq);
440
441 if (VM_ENV_LOCAL_P(env->ep) && VM_ENV_BOXED_P(env->ep)) {
442 const rb_box_t *box = VM_ENV_BOX(env->ep);
443 if (BOX_USER_P(box)) {
444 rb_gc_mark_and_move((VALUE *)&box->box_object);
445 }
446 }
447
448 if (reference_updating) {
449 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
450 }
451 else {
452 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
453 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
454 }
455 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
456 }
457 }
458
459 break;
460 }
461 case imemo_ifunc: {
462 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
463
464 if (!reference_updating) {
465 rb_gc_mark_maybe((VALUE)ifunc->data);
466 }
467
468 break;
469 }
470 case imemo_iseq:
471 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
472 break;
473 case imemo_memo: {
474 struct MEMO *memo = (struct MEMO *)obj;
475
476 rb_gc_mark_and_move((VALUE *)&memo->v1);
477 rb_gc_mark_and_move((VALUE *)&memo->v2);
478 if (!reference_updating) {
479 rb_gc_mark_maybe(memo->u3.value);
480 }
481
482 break;
483 }
484 case imemo_ment:
485 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
486 break;
487 case imemo_svar: {
488 struct vm_svar *svar = (struct vm_svar *)obj;
489
490 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
491 rb_gc_mark_and_move((VALUE *)&svar->lastline);
492 rb_gc_mark_and_move((VALUE *)&svar->backref);
493 rb_gc_mark_and_move((VALUE *)&svar->others);
494
495 break;
496 }
497 case imemo_throw_data: {
498 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
499
500 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
501
502 break;
503 }
504 case imemo_tmpbuf: {
505 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
506
507 if (!reference_updating) {
508 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
509 }
510
511 break;
512 }
513 case imemo_fields: {
514 rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
515
516 if (!rb_gc_checking_shareable()) {
517 // imemo_fields can refer unshareable objects
518 // even if the imemo_fields is shareable.
519
520 if (rb_shape_obj_too_complex_p(obj)) {
521 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
522 if (reference_updating) {
523 rb_gc_ref_update_table_values_only(tbl);
524 }
525 else {
526 rb_mark_tbl_no_pin(tbl);
527 }
528 }
529 else {
530 VALUE *fields = rb_imemo_fields_ptr(obj);
531 attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
532 for (attr_index_t i = 0; i < len; i++) {
533 rb_gc_mark_and_move(&fields[i]);
534 }
535 }
536 }
537 break;
538 }
539 default:
540 rb_bug("unreachable");
541 }
542}
543
544/* =========================================================================
545 * free
546 * ========================================================================= */
547
548static enum rb_id_table_iterator_result
549free_const_entry_i(VALUE value, void *data)
550{
551 rb_const_entry_t *ce = (rb_const_entry_t *)value;
552 xfree(ce);
553 return ID_TABLE_CONTINUE;
554}
555
556void
557rb_free_const_table(struct rb_id_table *tbl)
558{
559 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
560 rb_id_table_free(tbl);
561}
562
563static inline void
564imemo_fields_free(struct rb_fields *fields)
565{
566 if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_HEAP)) {
567 if (rb_shape_obj_too_complex_p((VALUE)fields)) {
568 st_free_table(fields->as.complex.table);
569 }
570 else {
571 xfree(fields->as.external.ptr);
572 }
573 }
574}
575
576void
577rb_imemo_free(VALUE obj)
578{
579 switch (imemo_type(obj)) {
580 case imemo_callcache:
581 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
582
583 break;
584 case imemo_callinfo:{
585 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
586
587 if (ci->kwarg) {
588 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
589 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
590 }
591 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
592
593 break;
594 }
595 case imemo_constcache:
596 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
597
598 break;
599 case imemo_cref:
600 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
601
602 break;
603 case imemo_env: {
604 rb_env_t *env = (rb_env_t *)obj;
605
606 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
607 xfree((VALUE *)env->env);
608 RB_DEBUG_COUNTER_INC(obj_imemo_env);
609
610 break;
611 }
612 case imemo_ifunc:
613 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
614 break;
615 case imemo_iseq:
616 rb_iseq_free((rb_iseq_t *)obj);
617 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
618
619 break;
620 case imemo_memo:
621 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
622
623 break;
624 case imemo_ment:
625 rb_free_method_entry((rb_method_entry_t *)obj);
626 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
627
628 break;
629 case imemo_svar:
630 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
631
632 break;
633 case imemo_throw_data:
634 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
635
636 break;
637 case imemo_tmpbuf:
638 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
639 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
640
641 break;
642 case imemo_fields:
643 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
644 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
645 break;
646 default:
647 rb_bug("unreachable");
648 }
649}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
Definition fl_type.h:62
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:130
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:128
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
MEMO.
Definition imemo.h:104
Definition vm_core.h:261
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376