Ruby 4.0.0dev (2025-12-20 revision 6bf921051ceba1742318e3c92dddd50ba4f05d17)
imemo.c (6bf921051ceba1742318e3c92dddd50ba4f05d17)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
9
10size_t rb_iseq_memsize(const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
12void rb_iseq_free(const rb_iseq_t *iseq);
13
14const char *
15rb_imemo_name(enum imemo_type type)
16{
17 // put no default case to get a warning if an imemo type is missing
18 switch (type) {
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
21 IMEMO_NAME(callinfo);
22 IMEMO_NAME(constcache);
23 IMEMO_NAME(cref);
24 IMEMO_NAME(env);
25 IMEMO_NAME(ifunc);
26 IMEMO_NAME(iseq);
27 IMEMO_NAME(memo);
28 IMEMO_NAME(ment);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32 IMEMO_NAME(fields);
33#undef IMEMO_NAME
34 }
35 rb_bug("unreachable");
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0, size_t size, bool is_shareable)
44{
45 VALUE flags = T_IMEMO | FL_WB_PROTECTED | (type << FL_USHIFT) | (is_shareable ? FL_SHAREABLE : 0);
46 NEWOBJ_OF(obj, void, v0, flags, size, 0);
47
48 return (VALUE)obj;
49}
50
52rb_imemo_tmpbuf_new(void)
53{
54 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
55 NEWOBJ_OF(obj, rb_imemo_tmpbuf_t, 0, flags, sizeof(rb_imemo_tmpbuf_t), NULL);
56
57 obj->ptr = NULL;
58 obj->cnt = 0;
59
60 return (VALUE)obj;
61}
62
63void *
64rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
65{
66 /* Keep the order; allocate an empty imemo first then xmalloc, to
67 * get rid of potential memory leak */
68 rb_imemo_tmpbuf_t *tmpbuf = (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new();
69 *store = (VALUE)tmpbuf;
70 void *ptr = ruby_xmalloc(size);
71 tmpbuf->ptr = ptr;
72 tmpbuf->cnt = cnt;
73
74 return ptr;
75}
76
77void *
78rb_alloc_tmp_buffer(volatile VALUE *store, long len)
79{
80 long cnt;
81
82 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
83 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
84 }
85
86 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
87}
88
89void
90rb_free_tmp_buffer(volatile VALUE *store)
91{
92 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
93 if (s) {
94 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
95 s->cnt = 0;
96 ruby_xfree(ptr);
97 }
98}
99
100static VALUE
101imemo_fields_new(VALUE owner, size_t capa, bool shareable)
102{
103 size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
104 if (rb_gc_size_allocatable_p(embedded_size)) {
105 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
106 RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
107 return fields;
108 }
109 else {
110 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
111 IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
112 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
113 return fields;
114 }
115}
116
117VALUE
118rb_imemo_fields_new(VALUE owner, size_t capa, bool shareable)
119{
120 return imemo_fields_new(owner, capa, shareable);
121}
122
123static VALUE
124imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
125{
126 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
127 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
128 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
129 return fields;
130}
131
132VALUE
133rb_imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
134{
135 return imemo_fields_new_complex(owner, capa, shareable);
136}
137
138static int
139imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
140{
141 VALUE field_obj = (VALUE)arg;
142 RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
143 return ST_CONTINUE;
144}
145
146static int
147imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
148{
149 RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
150 return ST_CONTINUE;
151}
152
153VALUE
154rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl, bool shareable)
155{
156 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
157 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
158 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
159 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
160 return fields;
161}
162
163VALUE
164rb_imemo_fields_clone(VALUE fields_obj)
165{
166 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
167 VALUE clone;
168
169 if (rb_shape_too_complex_p(shape_id)) {
170 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
171
172 st_table *dest_table = xcalloc(1, sizeof(st_table));
173 clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table, false /* TODO: check */);
174
175 st_replace(dest_table, src_table);
176 RBASIC_SET_SHAPE_ID(clone, shape_id);
177
178 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
179 }
180 else {
181 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id), false /* TODO: check */);
182 RBASIC_SET_SHAPE_ID(clone, shape_id);
183 VALUE *fields = rb_imemo_fields_ptr(clone);
184 attr_index_t fields_count = RSHAPE_LEN(shape_id);
185 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
186 for (attr_index_t i = 0; i < fields_count; i++) {
187 RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
188 }
189 }
190
191 return clone;
192}
193
194void
195rb_imemo_fields_clear(VALUE fields_obj)
196{
197 // When replacing an imemo/fields by another one, we must clear
198 // its shape so that gc.c:obj_free_object_id won't be called.
199 if (rb_shape_obj_too_complex_p(fields_obj)) {
200 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
201 }
202 else {
203 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
204 }
205 // Invalidate the ec->gen_fields_cache.
206 RBASIC_CLEAR_CLASS(fields_obj);
207}
208
209/* =========================================================================
210 * memsize
211 * ========================================================================= */
212
213size_t
214rb_imemo_memsize(VALUE obj)
215{
216 size_t size = 0;
217 switch (imemo_type(obj)) {
218 case imemo_callcache:
219 break;
220 case imemo_callinfo:
221 break;
222 case imemo_constcache:
223 break;
224 case imemo_cref:
225 break;
226 case imemo_env:
227 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
228
229 break;
230 case imemo_ifunc:
231 break;
232 case imemo_iseq:
233 size += rb_iseq_memsize((rb_iseq_t *)obj);
234
235 break;
236 case imemo_memo:
237 break;
238 case imemo_ment:
239 size += sizeof(((rb_method_entry_t *)obj)->def);
240
241 break;
242 case imemo_svar:
243 break;
244 case imemo_throw_data:
245 break;
246 case imemo_tmpbuf:
247 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
248
249 break;
250 case imemo_fields:
251 if (FL_TEST_RAW(obj, OBJ_FIELD_HEAP)) {
252 if (rb_shape_obj_too_complex_p(obj)) {
253 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
254 }
255 else {
256 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
257 }
258 }
259 break;
260 default:
261 rb_bug("unreachable");
262 }
263
264 return size;
265}
266
267/* =========================================================================
268 * mark
269 * ========================================================================= */
270
271static bool
272moved_or_living_object_strictly_p(VALUE obj)
273{
274 return !SPECIAL_CONST_P(obj) && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
275}
276
277static void
278mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
279{
280 rb_method_definition_t *def = ment->def;
281
282 rb_gc_mark_and_move(&ment->owner);
283 rb_gc_mark_and_move(&ment->defined_class);
284
285 if (def) {
286 switch (def->type) {
287 case VM_METHOD_TYPE_ISEQ:
288 if (def->body.iseq.iseqptr) {
289 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
290 }
291 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
292
293 if (!reference_updating) {
294 if (def->iseq_overload && ment->defined_class) {
295 // it can be a key of "overloaded_cme" table
296 // so it should be pinned.
297 rb_gc_mark((VALUE)ment);
298 }
299 }
300 break;
301 case VM_METHOD_TYPE_ATTRSET:
302 case VM_METHOD_TYPE_IVAR:
303 rb_gc_mark_and_move(&def->body.attr.location);
304 break;
305 case VM_METHOD_TYPE_BMETHOD:
306 if (!rb_gc_checking_shareable()) {
307 rb_gc_mark_and_move(&def->body.bmethod.proc);
308 }
309 break;
310 case VM_METHOD_TYPE_ALIAS:
311 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
312 return;
313 case VM_METHOD_TYPE_REFINED:
314 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
315 break;
316 case VM_METHOD_TYPE_CFUNC:
317 case VM_METHOD_TYPE_ZSUPER:
318 case VM_METHOD_TYPE_MISSING:
319 case VM_METHOD_TYPE_OPTIMIZED:
320 case VM_METHOD_TYPE_UNDEF:
321 case VM_METHOD_TYPE_NOTIMPLEMENTED:
322 break;
323 }
324 }
325}
326
327void
328rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
329{
330 switch (imemo_type(obj)) {
331 case imemo_callcache: {
332 /* cc is callcache.
333 *
334 * cc->klass (klass) should not be marked because if the klass is
335 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
336 *
337 * For "normal" CCs cc->cme (cme) should not be marked because the cc is
338 * invalidated through the klass when the cme is free'd.
339 * - klass marks cme if klass uses cme.
340 * - caller class's ccs->cme marks cc->cme.
341 * - if cc is invalidated (klass doesn't refer the cc), cc is
342 * invalidated by `vm_cc_invalidate()` after which cc->cme must not
343 * be accessed.
344 * - With multi-Ractors, cme will be collected with global GC
345 * so that it is safe if GC is not interleaving while accessing
346 * cc and cme.
347 *
348 * However cc_type_super and cc_type_refinement are not chained
349 * from ccs so cc->cme should be marked as long as the cc is valid;
350 * the cme might be reachable only through cc in these cases.
351 */
352 struct rb_callcache *cc = (struct rb_callcache *)obj;
353 if (UNDEF_P(cc->klass)) {
354 /* If it's invalidated, we must not mark anything.
355 * All fields should are considered invalid
356 */
357 }
358 else if (reference_updating) {
359 if (moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
360 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
361 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
362 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
363
364 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
365 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
366 }
367 else {
368 vm_cc_invalidate(cc);
369 }
370 }
371 else {
372 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
373 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
374
375 rb_gc_mark_weak((VALUE *)&cc->klass);
376 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
377 rb_gc_mark_movable((VALUE)cc->cme_);
378 }
379 }
380
381 break;
382 }
383 case imemo_callinfo:
384 break;
385 case imemo_constcache: {
387
388 if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
389 !rb_gc_checking_shareable()) {
390 rb_gc_mark_and_move(&ice->value);
391 }
392
393 break;
394 }
395 case imemo_cref: {
396 rb_cref_t *cref = (rb_cref_t *)obj;
397
398 if (!rb_gc_checking_shareable()) {
399 // cref->klass_or_self can be unshareable, but no way to access it from other ractors
400 rb_gc_mark_and_move(&cref->klass_or_self);
401 }
402
403 rb_gc_mark_and_move_ptr(&cref->next);
404
405 // TODO: Ractor and refeinements are not resolved yet
406 if (!rb_gc_checking_shareable()) {
407 rb_gc_mark_and_move(&cref->refinements);
408 }
409
410 break;
411 }
412 case imemo_env: {
413 rb_env_t *env = (rb_env_t *)obj;
414
415 if (LIKELY(env->ep)) {
416 // just after newobj() can be NULL here.
417 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
418 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
419
420 for (unsigned int i = 0; i < env->env_size; i++) {
421 rb_gc_mark_and_move((VALUE *)&env->env[i]);
422 }
423
424 rb_gc_mark_and_move_ptr(&env->iseq);
425
426 if (VM_ENV_LOCAL_P(env->ep) && VM_ENV_BOXED_P(env->ep)) {
427 const rb_box_t *box = VM_ENV_BOX(env->ep);
428 if (BOX_USER_P(box)) {
429 rb_gc_mark_and_move((VALUE *)&box->box_object);
430 }
431 }
432
433 if (reference_updating) {
434 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
435 }
436 else {
437 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
438 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
439 }
440 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
441 }
442 }
443
444 break;
445 }
446 case imemo_ifunc: {
447 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
448
449 if (!reference_updating) {
450 rb_gc_mark_maybe((VALUE)ifunc->data);
451 }
452
453 break;
454 }
455 case imemo_iseq:
456 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
457 break;
458 case imemo_memo: {
459 struct MEMO *memo = (struct MEMO *)obj;
460
461 rb_gc_mark_and_move((VALUE *)&memo->v1);
462 rb_gc_mark_and_move((VALUE *)&memo->v2);
463 if (!reference_updating) {
464 rb_gc_mark_maybe(memo->u3.value);
465 }
466
467 break;
468 }
469 case imemo_ment:
470 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
471 break;
472 case imemo_svar: {
473 struct vm_svar *svar = (struct vm_svar *)obj;
474
475 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
476 rb_gc_mark_and_move((VALUE *)&svar->lastline);
477 rb_gc_mark_and_move((VALUE *)&svar->backref);
478 rb_gc_mark_and_move((VALUE *)&svar->others);
479
480 break;
481 }
482 case imemo_throw_data: {
483 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
484
485 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
486
487 break;
488 }
489 case imemo_tmpbuf: {
490 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
491
492 if (!reference_updating) {
493 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
494 }
495
496 break;
497 }
498 case imemo_fields: {
499 rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
500
501 if (!rb_gc_checking_shareable()) {
502 // imemo_fields can refer unshareable objects
503 // even if the imemo_fields is shareable.
504
505 if (rb_shape_obj_too_complex_p(obj)) {
506 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
507 if (reference_updating) {
508 rb_gc_ref_update_table_values_only(tbl);
509 }
510 else {
511 rb_mark_tbl_no_pin(tbl);
512 }
513 }
514 else {
515 VALUE *fields = rb_imemo_fields_ptr(obj);
516 attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
517 for (attr_index_t i = 0; i < len; i++) {
518 rb_gc_mark_and_move(&fields[i]);
519 }
520 }
521 }
522 break;
523 }
524 default:
525 rb_bug("unreachable");
526 }
527}
528
529/* =========================================================================
530 * free
531 * ========================================================================= */
532
533static enum rb_id_table_iterator_result
534free_const_entry_i(VALUE value, void *data)
535{
536 rb_const_entry_t *ce = (rb_const_entry_t *)value;
537 xfree(ce);
538 return ID_TABLE_CONTINUE;
539}
540
541void
542rb_free_const_table(struct rb_id_table *tbl)
543{
544 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
545 rb_id_table_free(tbl);
546}
547
548static inline void
549imemo_fields_free(struct rb_fields *fields)
550{
551 if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_HEAP)) {
552 if (rb_shape_obj_too_complex_p((VALUE)fields)) {
553 st_free_table(fields->as.complex.table);
554 }
555 else {
556 xfree(fields->as.external.ptr);
557 }
558 }
559}
560
561void
562rb_imemo_free(VALUE obj)
563{
564 switch (imemo_type(obj)) {
565 case imemo_callcache:
566 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
567
568 break;
569 case imemo_callinfo:{
570 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
571
572 if (ci->kwarg) {
573 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
574 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
575 }
576 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
577
578 break;
579 }
580 case imemo_constcache:
581 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
582
583 break;
584 case imemo_cref:
585 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
586
587 break;
588 case imemo_env: {
589 rb_env_t *env = (rb_env_t *)obj;
590
591 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
592 xfree((VALUE *)env->env);
593 RB_DEBUG_COUNTER_INC(obj_imemo_env);
594
595 break;
596 }
597 case imemo_ifunc:
598 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
599 break;
600 case imemo_iseq:
601 rb_iseq_free((rb_iseq_t *)obj);
602 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
603
604 break;
605 case imemo_memo:
606 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
607
608 break;
609 case imemo_ment:
610 rb_free_method_entry((rb_method_entry_t *)obj);
611 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
612
613 break;
614 case imemo_svar:
615 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
616
617 break;
618 case imemo_throw_data:
619 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
620
621 break;
622 case imemo_tmpbuf:
623 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
624 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
625
626 break;
627 case imemo_fields:
628 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
629 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
630 break;
631 default:
632 rb_bug("unreachable");
633 }
634}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
Definition fl_type.h:63
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
MEMO.
Definition imemo.h:104
Definition vm_core.h:261
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376