Ruby 3.5.0dev (2025-11-01 revision 8db30094fce210cd193b571d7c4b7e5702ed1ca9)
imemo.c (8db30094fce210cd193b571d7c4b7e5702ed1ca9)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
9
10size_t rb_iseq_memsize(const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
12void rb_iseq_free(const rb_iseq_t *iseq);
13
14const char *
15rb_imemo_name(enum imemo_type type)
16{
17 // put no default case to get a warning if an imemo type is missing
18 switch (type) {
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
21 IMEMO_NAME(callinfo);
22 IMEMO_NAME(constcache);
23 IMEMO_NAME(cref);
24 IMEMO_NAME(env);
25 IMEMO_NAME(ifunc);
26 IMEMO_NAME(iseq);
27 IMEMO_NAME(memo);
28 IMEMO_NAME(ment);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32 IMEMO_NAME(fields);
33#undef IMEMO_NAME
34 }
35 rb_bug("unreachable");
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0, size_t size, bool is_shareable)
44{
45 VALUE flags = T_IMEMO | FL_WB_PROTECTED | (type << FL_USHIFT) | (is_shareable ? FL_SHAREABLE : 0);
46 NEWOBJ_OF(obj, void, v0, flags, size, 0);
47
48 return (VALUE)obj;
49}
50
52rb_imemo_tmpbuf_new(void)
53{
54 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
55 NEWOBJ_OF(obj, rb_imemo_tmpbuf_t, 0, flags, sizeof(rb_imemo_tmpbuf_t), NULL);
56
57 obj->ptr = NULL;
58 obj->cnt = 0;
59
60 return (VALUE)obj;
61}
62
63void *
64rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
65{
66 /* Keep the order; allocate an empty imemo first then xmalloc, to
67 * get rid of potential memory leak */
68 rb_imemo_tmpbuf_t *tmpbuf = (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new();
69 *store = (VALUE)tmpbuf;
70 void *ptr = ruby_xmalloc(size);
71 tmpbuf->ptr = ptr;
72 tmpbuf->cnt = cnt;
73
74 return ptr;
75}
76
77void *
78rb_alloc_tmp_buffer(volatile VALUE *store, long len)
79{
80 long cnt;
81
82 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
83 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
84 }
85
86 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
87}
88
89void
90rb_free_tmp_buffer(volatile VALUE *store)
91{
92 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
93 if (s) {
94 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
95 s->cnt = 0;
96 ruby_xfree(ptr);
97 }
98}
99
100static VALUE
101imemo_fields_new(VALUE owner, size_t capa, bool shareable)
102{
103 size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
104 if (rb_gc_size_allocatable_p(embedded_size)) {
105 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
106 RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
107 return fields;
108 }
109 else {
110 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
111 IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
112 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
113 return fields;
114 }
115}
116
117VALUE
118rb_imemo_fields_new(VALUE owner, size_t capa, bool shareable)
119{
120 return imemo_fields_new(owner, capa, shareable);
121}
122
123static VALUE
124imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
125{
126 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
127 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
128 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
129 return fields;
130}
131
132VALUE
133rb_imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
134{
135 return imemo_fields_new_complex(owner, capa, shareable);
136}
137
138static int
139imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
140{
141 VALUE field_obj = (VALUE)arg;
142 RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
143 return ST_CONTINUE;
144}
145
146static int
147imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
148{
149 RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
150 return ST_CONTINUE;
151}
152
153VALUE
154rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl, bool shareable)
155{
156 VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
157 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
158 FL_SET_RAW(fields, OBJ_FIELD_HEAP);
159 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
160 return fields;
161}
162
163VALUE
164rb_imemo_fields_clone(VALUE fields_obj)
165{
166 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
167 VALUE clone;
168
169 if (rb_shape_too_complex_p(shape_id)) {
170 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
171
172 st_table *dest_table = xcalloc(1, sizeof(st_table));
173 clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table, false /* TODO: check */);
174
175 st_replace(dest_table, src_table);
176 RBASIC_SET_SHAPE_ID(clone, shape_id);
177
178 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
179 }
180 else {
181 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id), false /* TODO: check */);
182 RBASIC_SET_SHAPE_ID(clone, shape_id);
183 VALUE *fields = rb_imemo_fields_ptr(clone);
184 attr_index_t fields_count = RSHAPE_LEN(shape_id);
185 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
186 for (attr_index_t i = 0; i < fields_count; i++) {
187 RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
188 }
189 }
190
191 return clone;
192}
193
194void
195rb_imemo_fields_clear(VALUE fields_obj)
196{
197 // When replacing an imemo/fields by another one, we must clear
198 // its shape so that gc.c:obj_free_object_id won't be called.
199 if (rb_shape_obj_too_complex_p(fields_obj)) {
200 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
201 }
202 else {
203 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
204 }
205 // Invalidate the ec->gen_fields_cache.
206 RBASIC_CLEAR_CLASS(fields_obj);
207}
208
209/* =========================================================================
210 * memsize
211 * ========================================================================= */
212
213size_t
214rb_imemo_memsize(VALUE obj)
215{
216 size_t size = 0;
217 switch (imemo_type(obj)) {
218 case imemo_callcache:
219 break;
220 case imemo_callinfo:
221 break;
222 case imemo_constcache:
223 break;
224 case imemo_cref:
225 break;
226 case imemo_env:
227 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
228
229 break;
230 case imemo_ifunc:
231 break;
232 case imemo_iseq:
233 size += rb_iseq_memsize((rb_iseq_t *)obj);
234
235 break;
236 case imemo_memo:
237 break;
238 case imemo_ment:
239 size += sizeof(((rb_method_entry_t *)obj)->def);
240
241 break;
242 case imemo_svar:
243 break;
244 case imemo_throw_data:
245 break;
246 case imemo_tmpbuf:
247 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
248
249 break;
250 case imemo_fields:
251 if (FL_TEST_RAW(obj, OBJ_FIELD_HEAP)) {
252 if (rb_shape_obj_too_complex_p(obj)) {
253 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
254 }
255 else {
256 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
257 }
258 }
259 break;
260 default:
261 rb_bug("unreachable");
262 }
263
264 return size;
265}
266
267/* =========================================================================
268 * mark
269 * ========================================================================= */
270
271static bool
272moved_or_living_object_strictly_p(VALUE obj)
273{
274 return !SPECIAL_CONST_P(obj) && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
275}
276
277static void
278mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
279{
280 rb_method_definition_t *def = ment->def;
281
282 rb_gc_mark_and_move(&ment->owner);
283 rb_gc_mark_and_move(&ment->defined_class);
284
285 if (def) {
286 switch (def->type) {
287 case VM_METHOD_TYPE_ISEQ:
288 if (def->body.iseq.iseqptr) {
289 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
290 }
291 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
292
293 if (!reference_updating) {
294 if (def->iseq_overload && ment->defined_class) {
295 // it can be a key of "overloaded_cme" table
296 // so it should be pinned.
297 rb_gc_mark((VALUE)ment);
298 }
299 }
300 break;
301 case VM_METHOD_TYPE_ATTRSET:
302 case VM_METHOD_TYPE_IVAR:
303 rb_gc_mark_and_move(&def->body.attr.location);
304 break;
305 case VM_METHOD_TYPE_BMETHOD:
306 if (!rb_gc_checking_shareable()) {
307 rb_gc_mark_and_move(&def->body.bmethod.proc);
308 }
309 if (def->body.bmethod.hooks) {
310 rb_hook_list_mark_and_move(def->body.bmethod.hooks);
311 }
312 break;
313 case VM_METHOD_TYPE_ALIAS:
314 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
315 return;
316 case VM_METHOD_TYPE_REFINED:
317 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
318 break;
319 case VM_METHOD_TYPE_CFUNC:
320 case VM_METHOD_TYPE_ZSUPER:
321 case VM_METHOD_TYPE_MISSING:
322 case VM_METHOD_TYPE_OPTIMIZED:
323 case VM_METHOD_TYPE_UNDEF:
324 case VM_METHOD_TYPE_NOTIMPLEMENTED:
325 break;
326 }
327 }
328}
329
330void
331rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
332{
333 switch (imemo_type(obj)) {
334 case imemo_callcache: {
335 /* cc is callcache.
336 *
337 * cc->klass (klass) should not be marked because if the klass is
338 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
339 *
340 * For "normal" CCs cc->cme (cme) should not be marked because the cc is
341 * invalidated through the klass when the cme is free'd.
342 * - klass marks cme if klass uses cme.
343 * - caller class's ccs->cme marks cc->cme.
344 * - if cc is invalidated (klass doesn't refer the cc), cc is
345 * invalidated by `vm_cc_invalidate()` after which cc->cme must not
346 * be accessed.
347 * - With multi-Ractors, cme will be collected with global GC
348 * so that it is safe if GC is not interleaving while accessing
349 * cc and cme.
350 *
351 * However cc_type_super and cc_type_refinement are not chained
352 * from ccs so cc->cme should be marked as long as the cc is valid;
353 * the cme might be reachable only through cc in these cases.
354 */
355 struct rb_callcache *cc = (struct rb_callcache *)obj;
356 if (UNDEF_P(cc->klass)) {
357 /* If it's invalidated, we must not mark anything.
358 * All fields should are considered invalid
359 */
360 }
361 else if (reference_updating) {
362 if (moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
363 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
364 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
365 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
366
367 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
368 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
369 }
370 else {
371 vm_cc_invalidate(cc);
372 }
373 }
374 else {
375 RUBY_ASSERT(RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
376 RUBY_ASSERT(IMEMO_TYPE_P((VALUE)cc->cme_, imemo_ment));
377
378 rb_gc_mark_weak((VALUE *)&cc->klass);
379 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
380 rb_gc_mark_movable((VALUE)cc->cme_);
381 }
382 }
383
384 break;
385 }
386 case imemo_callinfo:
387 break;
388 case imemo_constcache: {
390
391 if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
392 !rb_gc_checking_shareable()) {
393 rb_gc_mark_and_move(&ice->value);
394 }
395
396 break;
397 }
398 case imemo_cref: {
399 rb_cref_t *cref = (rb_cref_t *)obj;
400
401 if (!rb_gc_checking_shareable()) {
402 // cref->klass_or_self can be unshareable, but no way to access it from other ractors
403 rb_gc_mark_and_move(&cref->klass_or_self);
404 }
405
406 rb_gc_mark_and_move_ptr(&cref->next);
407
408 // TODO: Ractor and refeinements are not resolved yet
409 if (!rb_gc_checking_shareable()) {
410 rb_gc_mark_and_move(&cref->refinements);
411 }
412
413 break;
414 }
415 case imemo_env: {
416 rb_env_t *env = (rb_env_t *)obj;
417
418 if (LIKELY(env->ep)) {
419 // just after newobj() can be NULL here.
420 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
421 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
422
423 for (unsigned int i = 0; i < env->env_size; i++) {
424 rb_gc_mark_and_move((VALUE *)&env->env[i]);
425 }
426
427 rb_gc_mark_and_move_ptr(&env->iseq);
428
429 if (reference_updating) {
430 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
431 }
432 else {
433 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
434 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
435 }
436 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
437 }
438 }
439
440 break;
441 }
442 case imemo_ifunc: {
443 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
444
445 if (!reference_updating) {
446 rb_gc_mark_maybe((VALUE)ifunc->data);
447 }
448
449 break;
450 }
451 case imemo_iseq:
452 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
453 break;
454 case imemo_memo: {
455 struct MEMO *memo = (struct MEMO *)obj;
456
457 rb_gc_mark_and_move((VALUE *)&memo->v1);
458 rb_gc_mark_and_move((VALUE *)&memo->v2);
459 if (!reference_updating) {
460 rb_gc_mark_maybe(memo->u3.value);
461 }
462
463 break;
464 }
465 case imemo_ment:
466 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
467 break;
468 case imemo_svar: {
469 struct vm_svar *svar = (struct vm_svar *)obj;
470
471 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
472 rb_gc_mark_and_move((VALUE *)&svar->lastline);
473 rb_gc_mark_and_move((VALUE *)&svar->backref);
474 rb_gc_mark_and_move((VALUE *)&svar->others);
475
476 break;
477 }
478 case imemo_throw_data: {
479 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
480
481 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
482
483 break;
484 }
485 case imemo_tmpbuf: {
486 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
487
488 if (!reference_updating) {
489 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
490 }
491
492 break;
493 }
494 case imemo_fields: {
495 rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
496
497 if (!rb_gc_checking_shareable()) {
498 // imemo_fields can refer unshareable objects
499 // even if the imemo_fields is shareable.
500
501 if (rb_shape_obj_too_complex_p(obj)) {
502 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
503 if (reference_updating) {
504 rb_gc_ref_update_table_values_only(tbl);
505 }
506 else {
507 rb_mark_tbl_no_pin(tbl);
508 }
509 }
510 else {
511 VALUE *fields = rb_imemo_fields_ptr(obj);
512 attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
513 for (attr_index_t i = 0; i < len; i++) {
514 rb_gc_mark_and_move(&fields[i]);
515 }
516 }
517 }
518 break;
519 }
520 default:
521 rb_bug("unreachable");
522 }
523}
524
525/* =========================================================================
526 * free
527 * ========================================================================= */
528
529static enum rb_id_table_iterator_result
530free_const_entry_i(VALUE value, void *data)
531{
532 rb_const_entry_t *ce = (rb_const_entry_t *)value;
533 xfree(ce);
534 return ID_TABLE_CONTINUE;
535}
536
537void
538rb_free_const_table(struct rb_id_table *tbl)
539{
540 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
541 rb_id_table_free(tbl);
542}
543
544static inline void
545imemo_fields_free(struct rb_fields *fields)
546{
547 if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_HEAP)) {
548 if (rb_shape_obj_too_complex_p((VALUE)fields)) {
549 st_free_table(fields->as.complex.table);
550 }
551 else {
552 xfree(fields->as.external.ptr);
553 }
554 }
555}
556
557void
558rb_imemo_free(VALUE obj)
559{
560 switch (imemo_type(obj)) {
561 case imemo_callcache:
562 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
563
564 break;
565 case imemo_callinfo:{
566 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
567
568 if (ci->kwarg) {
569 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
570 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
571 }
572 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
573
574 break;
575 }
576 case imemo_constcache:
577 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
578
579 break;
580 case imemo_cref:
581 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
582
583 break;
584 case imemo_env: {
585 rb_env_t *env = (rb_env_t *)obj;
586
587 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
588 xfree((VALUE *)env->env);
589 RB_DEBUG_COUNTER_INC(obj_imemo_env);
590
591 break;
592 }
593 case imemo_ifunc:
594 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
595 break;
596 case imemo_iseq:
597 rb_iseq_free((rb_iseq_t *)obj);
598 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
599
600 break;
601 case imemo_memo:
602 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
603
604 break;
605 case imemo_ment:
606 rb_free_method_entry((rb_method_entry_t *)obj);
607 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
608
609 break;
610 case imemo_svar:
611 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
612
613 break;
614 case imemo_throw_data:
615 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
616
617 break;
618 case imemo_tmpbuf:
619 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
620 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
621
622 break;
623 case imemo_fields:
624 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
625 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
626 break;
627 default:
628 rb_bug("unreachable");
629 }
630}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
Definition fl_type.h:63
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
MEMO.
Definition imemo.h:104
Definition vm_core.h:261
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition method.h:55
rb_cref_t * cref
class reference, should be marked
Definition method.h:144
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376