Ruby 3.5.0dev (2025-04-26 revision b1283b45e6246a85ce34be54e5806fb0ca2e3d2d)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *id_to_obj_tbl;
28 st_table *obj_to_id_tbl;
29 unsigned long long next_object_id;
30
31 st_table *finalizer_table;
32 struct MMTk_final_job *finalizer_jobs;
33 rb_postponed_job_handle_t finalizer_postponed_job;
34
35 struct ccan_list_head ractor_caches;
36 unsigned long live_ractor_cache_count;
37
38 pthread_mutex_t mutex;
39 bool world_stopped;
40 pthread_cond_t cond_world_stopped;
41 pthread_cond_t cond_world_started;
42 size_t start_the_world_count;
43
44 struct rb_gc_vm_context vm_context;
45};
46
48 struct ccan_list_node list_node;
49
50 MMTk_Mutator *mutator;
51 bool gc_mutator_p;
52};
53
55 struct MMTk_final_job *next;
56 enum {
57 MMTK_FINAL_JOB_DFREE,
58 MMTK_FINAL_JOB_FINALIZE,
59 } kind;
60 union {
61 struct {
62 void (*func)(void *);
63 void *data;
64 } dfree;
65 struct {
66 VALUE object_id;
67 VALUE finalizer_array;
68 } finalize;
69 } as;
70};
71
72#ifdef RB_THREAD_LOCAL_SPECIFIER
73RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
74#else
75# error We currently need language-supported TLS
76#endif
77
78#include <pthread.h>
79
80static void
81rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
82{
83 rb_mmtk_gc_thread_tls = gc_thread_tls;
84}
85
86static bool
87rb_mmtk_is_mutator(void)
88{
89 return ruby_native_thread_p();
90}
91
92static void
93rb_mmtk_stop_the_world(void)
94{
95 struct objspace *objspace = rb_gc_get_objspace();
96
97 int err;
98 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
99 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
100 }
101
102 while (!objspace->world_stopped) {
103 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
104 }
105
106 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
107 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
108 }
109}
110
111static void
112rb_mmtk_resume_mutators(void)
113{
114 struct objspace *objspace = rb_gc_get_objspace();
115
116 int err;
117 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
118 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
119 }
120
121 objspace->world_stopped = false;
122 objspace->gc_count++;
123 pthread_cond_broadcast(&objspace->cond_world_started);
124
125 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
126 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
127 }
128}
129
130static void
131rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
132{
133 struct objspace *objspace = rb_gc_get_objspace();
134
135 size_t starting_gc_count = objspace->gc_count;
136 int lock_lev = rb_gc_vm_lock();
137 int err;
138 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
139 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
140 }
141
142 if (objspace->gc_count == starting_gc_count) {
143 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
144
145 rb_gc_initialize_vm_context(&objspace->vm_context);
146
147 mutator->gc_mutator_p = true;
148
149 struct timespec gc_start_time;
150 if (objspace->measure_gc_time) {
151 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
152 }
153
154 rb_gc_save_machine_context();
155
156 rb_gc_vm_barrier();
157
158 objspace->world_stopped = true;
159
160 pthread_cond_broadcast(&objspace->cond_world_stopped);
161
162 // Wait for GC end
163 while (objspace->world_stopped) {
164 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
165 }
166
167 if (objspace->measure_gc_time) {
168 struct timespec gc_end_time;
169 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
170
171 objspace->total_gc_time +=
172 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
173 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
174 }
175 }
176
177 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
178 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
179 }
180 rb_gc_vm_unlock(lock_lev);
181}
182
183static size_t
184rb_mmtk_number_of_mutators(void)
185{
186 struct objspace *objspace = rb_gc_get_objspace();
187 return objspace->live_ractor_cache_count;
188}
189
190static void
191rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
192{
193 struct objspace *objspace = rb_gc_get_objspace();
194 struct MMTk_ractor_cache *ractor_cache;
195
196 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
197 visit_mutator(ractor_cache->mutator, data);
198 }
199}
200
201static void
202rb_mmtk_scan_gc_roots(void)
203{
204 struct objspace *objspace = rb_gc_get_objspace();
205
206 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
207 // See: https://github.com/ruby/mmtk/issues/22
208 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
209 rb_gc_mark_roots(objspace, NULL);
210 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
211}
212
213static int
214pin_value(st_data_t key, st_data_t value, st_data_t data)
215{
216 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
217
218 return ST_CONTINUE;
219}
220
221static void
222rb_mmtk_scan_objspace(void)
223{
224 struct objspace *objspace = rb_gc_get_objspace();
225
226 if (objspace->finalizer_table != NULL) {
227 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
228 }
229
230 st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
231
232 struct MMTk_final_job *job = objspace->finalizer_jobs;
233 while (job != NULL) {
234 switch (job->kind) {
235 case MMTK_FINAL_JOB_DFREE:
236 break;
237 case MMTK_FINAL_JOB_FINALIZE:
238 rb_gc_impl_mark(objspace, job->as.finalize.object_id);
239 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
240 break;
241 default:
242 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
243 }
244
245 job = job->next;
246 }
247}
248
249static void
250rb_mmtk_scan_object_ruby_style(MMTk_ObjectReference object)
251{
252 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
253}
254
255static void
256rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
257{
258 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
259}
260
261static void
262rb_mmtk_call_obj_free(MMTk_ObjectReference object)
263{
264 VALUE obj = (VALUE)object;
265 struct objspace *objspace = rb_gc_get_objspace();
266
267 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
268 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
269 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
270 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
271 }
272
273 rb_gc_obj_free(objspace, obj);
274}
275
276static size_t
277rb_mmtk_vm_live_bytes(void)
278{
279 return 0;
280}
281
282static void
283make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
284{
286 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)table));
288
290
291 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
292 job->next = objspace->finalizer_jobs;
293 job->kind = MMTK_FINAL_JOB_FINALIZE;
294 job->as.finalize.object_id = rb_obj_id((VALUE)obj);
295 job->as.finalize.finalizer_array = table;
296
297 objspace->finalizer_jobs = job;
298}
299
300static int
301rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data)
302{
304 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
306
307 struct objspace *objspace = (struct objspace *)data;
308
309 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
310 make_final_job(objspace, (VALUE)key, (VALUE)value);
311
312 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
313
314 return ST_DELETE;
315 }
316
317 return ST_CONTINUE;
318}
319
320static void
321rb_mmtk_update_finalizer_table(void)
322{
323 struct objspace *objspace = rb_gc_get_objspace();
324
325 // TODO: replace with st_foreach_with_replace when GC is moving
326 st_foreach(objspace->finalizer_table, rb_mmtk_update_finalizer_table_i, (st_data_t)objspace);
327}
328
329static int
330rb_mmtk_update_table_i(VALUE val, void *data)
331{
332 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
333 return ST_DELETE;
334 }
335
336 return ST_CONTINUE;
337}
338
339static int
340rb_mmtk_update_obj_id_tables_obj_to_id_i(st_data_t key, st_data_t val, st_data_t data)
341{
343
344 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
345 return ST_DELETE;
346 }
347
348 return ST_CONTINUE;
349}
350
351static int
352rb_mmtk_update_obj_id_tables_id_to_obj_i(st_data_t key, st_data_t val, st_data_t data)
353{
355
356 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
357 return ST_DELETE;
358 }
359
360 return ST_CONTINUE;
361}
362
363static void
364rb_mmtk_update_obj_id_tables(void)
365{
366 struct objspace *objspace = rb_gc_get_objspace();
367
368 st_foreach(objspace->obj_to_id_tbl, rb_mmtk_update_obj_id_tables_obj_to_id_i, 0);
369 if (objspace->id_to_obj_tbl) {
370 st_foreach(objspace->id_to_obj_tbl, rb_mmtk_update_obj_id_tables_id_to_obj_i, 0);
371 }
372}
373
374static int
375rb_mmtk_global_tables_count(void)
376{
377 return RB_GC_VM_WEAK_TABLE_COUNT;
378}
379
380static void
381rb_mmtk_update_global_tables(int table)
382{
383 RUBY_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
384
385 rb_gc_vm_weak_table_foreach(rb_mmtk_update_table_i, NULL, NULL, true, (enum rb_gc_vm_weak_tables)table);
386}
387
388// Bootup
389MMTk_RubyUpcalls ruby_upcalls = {
390 rb_mmtk_init_gc_worker_thread,
391 rb_mmtk_is_mutator,
392 rb_mmtk_stop_the_world,
393 rb_mmtk_resume_mutators,
394 rb_mmtk_block_for_gc,
395 rb_mmtk_number_of_mutators,
396 rb_mmtk_get_mutators,
397 rb_mmtk_scan_gc_roots,
398 rb_mmtk_scan_objspace,
399 rb_mmtk_scan_object_ruby_style,
400 rb_mmtk_call_gc_mark_children,
401 rb_mmtk_call_obj_free,
402 rb_mmtk_vm_live_bytes,
403 rb_mmtk_update_global_tables,
404 rb_mmtk_global_tables_count,
405 rb_mmtk_update_finalizer_table,
406 rb_mmtk_update_obj_id_tables,
407};
408
409// Use max 80% of the available memory by default for MMTk
410#define RB_MMTK_HEAP_LIMIT_PERC 80
411#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
412#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
413
414enum mmtk_heap_mode {
415 RB_MMTK_DYNAMIC_HEAP,
416 RB_MMTK_FIXED_HEAP
417};
418
419MMTk_Builder *
420rb_mmtk_builder_init(void)
421{
422 MMTk_Builder *builder = mmtk_builder_default();
423 return builder;
424}
425
426void *
427rb_gc_impl_objspace_alloc(void)
428{
429 MMTk_Builder *builder = rb_mmtk_builder_init();
430 mmtk_init_binding(builder, NULL, &ruby_upcalls, (MMTk_ObjectReference)Qundef);
431
432 return calloc(1, sizeof(struct objspace));
433}
434
435static void objspace_obj_id_init(struct objspace *objspace);
436static void gc_run_finalizers(void *data);
437
438void
439rb_gc_impl_objspace_init(void *objspace_ptr)
440{
441 struct objspace *objspace = objspace_ptr;
442
443 objspace->measure_gc_time = true;
444
445 objspace_obj_id_init(objspace);
446
447 objspace->finalizer_table = st_init_numtable();
448 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
449
450 ccan_list_head_init(&objspace->ractor_caches);
451
452 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
453 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
454 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
455}
456
457void
458rb_gc_impl_objspace_free(void *objspace_ptr)
459{
460 free(objspace_ptr);
461}
462
463void *
464rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
465{
466 struct objspace *objspace = objspace_ptr;
467 if (objspace->live_ractor_cache_count == 0) {
468 mmtk_initialize_collection(ractor);
469 }
470 objspace->live_ractor_cache_count++;
471
472 struct MMTk_ractor_cache *cache = malloc(sizeof(struct MMTk_ractor_cache));
473 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
474
475 cache->mutator = mmtk_bind_mutator(cache);
476
477 return cache;
478}
479
480void
481rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
482{
483 struct objspace *objspace = objspace_ptr;
484 struct MMTk_ractor_cache *cache = cache_ptr;
485
486 ccan_list_del(&cache->list_node);
487
488 RUBY_ASSERT(objspace->live_ractor_cache_count > 1);
489 objspace->live_ractor_cache_count--;
490
491 mmtk_destroy_mutator(cache->mutator);
492}
493
494void rb_gc_impl_set_params(void *objspace_ptr) { }
495
496static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
497
498void
499rb_gc_impl_init(void)
500{
501 VALUE gc_constants = rb_hash_new();
502 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
503 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
504 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(640));
505 // Pretend we have 5 size pools
506 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
507 OBJ_FREEZE(gc_constants);
508 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
509
510 // no-ops for compatibility
511 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
512
516 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
517 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
518}
519
520static size_t heap_sizes[6] = {
521 40, 80, 160, 320, 640, 0
522};
523
524size_t *
525rb_gc_impl_heap_sizes(void *objspace_ptr)
526{
527 return heap_sizes;
528}
529
530int
531rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
532{
533 struct objspace *objspace = data;
534
535 if (!RB_TYPE_P(obj, T_NONE)) {
536 rb_gc_obj_free_vm_weak_references(obj);
537 rb_gc_obj_free(objspace, obj);
538 }
539
540 return 0;
541}
542
543// Shutdown
544static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
545
546void
547rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
548{
549 mmtk_set_gc_enabled(false);
550 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
551 mmtk_set_gc_enabled(true);
552}
553
554// GC
555void
556rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
557{
558 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
559}
560
561bool
562rb_gc_impl_during_gc_p(void *objspace_ptr)
563{
564 // TODO
565 return false;
566}
567
568static void
569rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
570{
571 rb_gc_prepare_heap_process_object((VALUE)obj);
572}
573
574void
575rb_gc_impl_prepare_heap(void *objspace_ptr)
576{
577 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
578}
579
580void
581rb_gc_impl_gc_enable(void *objspace_ptr)
582{
583 mmtk_set_gc_enabled(true);
584}
585
586void
587rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
588{
589 mmtk_set_gc_enabled(false);
590}
591
592bool
593rb_gc_impl_gc_enabled_p(void *objspace_ptr)
594{
595 return mmtk_gc_enabled_p();
596}
597
598void
599rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
600{
601 struct objspace *objspace = objspace_ptr;
602
603 objspace->gc_stress = RTEST(flag);
604}
605
606VALUE
607rb_gc_impl_stress_get(void *objspace_ptr)
608{
609 struct objspace *objspace = objspace_ptr;
610
611 return objspace->gc_stress ? Qtrue : Qfalse;
612}
613
614VALUE
615rb_gc_impl_config_get(void *objspace_ptr)
616{
617 VALUE hash = rb_hash_new();
618
619 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
620 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
621 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
622 size_t heap_min = mmtk_heap_min();
623 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
624 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
625
626 return hash;
627}
628
629void
630rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
631{
632 // TODO
633}
634
635// Object allocation
636
637VALUE
638rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
639{
640#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
641 struct objspace *objspace = objspace_ptr;
642 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
643
644 if (alloc_size > 640) rb_bug("too big");
645 for (int i = 0; i < 5; i++) {
646 if (alloc_size == heap_sizes[i]) break;
647 if (alloc_size < heap_sizes[i]) {
648 alloc_size = heap_sizes[i];
649 break;
650 }
651 }
652
653 if (objspace->gc_stress) {
654 mmtk_handle_user_collection_request(ractor_cache, false, false);
655 }
656
657 VALUE *alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size + 8, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
658 alloc_obj++;
659 alloc_obj[-1] = alloc_size;
660 alloc_obj[0] = flags;
661 alloc_obj[1] = klass;
662 if (alloc_size > 16) alloc_obj[2] = v1;
663 if (alloc_size > 24) alloc_obj[3] = v2;
664 if (alloc_size > 32) alloc_obj[4] = v3;
665
666 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size + 8, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
667
668 // TODO: only add when object needs obj_free to be called
669 mmtk_add_obj_free_candidate(alloc_obj);
670
671 objspace->total_allocated_objects++;
672
673 return (VALUE)alloc_obj;
674}
675
676size_t
677rb_gc_impl_obj_slot_size(VALUE obj)
678{
679 return ((VALUE *)obj)[-1];
680}
681
682size_t
683rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
684{
685 for (int i = 0; i < 5; i++) {
686 if (size == heap_sizes[i]) return i;
687 if (size < heap_sizes[i]) return i;
688 }
689
690 rb_bug("size too big");
691}
692
693bool
694rb_gc_impl_size_allocatable_p(size_t size)
695{
696 return size <= 640;
697}
698
699// Malloc
700void *
701rb_gc_impl_malloc(void *objspace_ptr, size_t size)
702{
703 // TODO: don't use system malloc
704 return malloc(size);
705}
706
707void *
708rb_gc_impl_calloc(void *objspace_ptr, size_t size)
709{
710 // TODO: don't use system calloc
711 return calloc(1, size);
712}
713
714void *
715rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
716{
717 // TODO: don't use system realloc
718 return realloc(ptr, new_size);
719}
720
721void
722rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
723{
724 // TODO: don't use system free
725 free(ptr);
726}
727
728void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
729
730// Marking
731void
732rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
733{
734 if (RB_SPECIAL_CONST_P(obj)) return;
735
736 rb_mmtk_gc_thread_tls->object_closure.c_function(rb_mmtk_gc_thread_tls->object_closure.rust_closure,
737 rb_mmtk_gc_thread_tls->gc_context,
738 (MMTk_ObjectReference)obj,
739 false);
740}
741
742void
743rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
744{
745 if (RB_SPECIAL_CONST_P(*ptr)) return;
746
747 // TODO: make it movable
748 rb_gc_impl_mark(objspace_ptr, *ptr);
749}
750
751void
752rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
753{
754 if (RB_SPECIAL_CONST_P(obj)) return;
755
756 // TODO: also pin
757 rb_gc_impl_mark(objspace_ptr, obj);
758}
759
760void
761rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
762{
763 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
764 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
765 }
766}
767
768void
769rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
770{
771 mmtk_mark_weak((MMTk_ObjectReference *)ptr);
772}
773
774void
775rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
776{
777 mmtk_remove_weak((MMTk_ObjectReference *)ptr);
778}
779
780// Compaction
781bool
782rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
783{
784 rb_bug("unimplemented");
785}
786
787VALUE
788rb_gc_impl_location(void *objspace_ptr, VALUE value)
789{
790 rb_bug("unimplemented");
791}
792
793// Write barriers
794void
795rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
796{
797 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
798
799 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
800}
801
802void
803rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
804{
805 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
806}
807
808void
809rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
810{
811 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
812
813 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
814}
815
816// Heap walking
817static void
818each_objects_i(MMTk_ObjectReference obj, void *d)
819{
820 rb_darray(VALUE) *objs = d;
821
822 rb_darray_append(objs, (VALUE)obj);
823}
824
825static void
826each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
827{
828 rb_darray(VALUE) objs;
829 rb_darray_make(&objs, 0);
830
831 mmtk_enumerate_objects(each_objects_i, &objs);
832
833 VALUE *obj_ptr;
834 rb_darray_foreach(objs, i, obj_ptr) {
835 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
836
837 if (func(*obj_ptr, data) != 0) {
838 break;
839 }
840 }
841
842 rb_darray_free(objs);
843}
844
846 int (*func)(void *, void *, size_t, void *);
847 void *data;
848};
849
850static int
851rb_gc_impl_each_objects_i(VALUE obj, void *d)
852{
853 struct rb_gc_impl_each_objects_data *data = d;
854
855 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
856
857 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
858}
859
860void
861rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
862{
863 struct rb_gc_impl_each_objects_data each_objects_data = {
864 .func = func,
865 .data = data
866 };
867
868 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
869}
870
872 void (*func)(VALUE, void *);
873 void *data;
874};
875
876static int
877rb_gc_impl_each_object_i(VALUE obj, void *d)
878{
879 struct rb_gc_impl_each_object_data *data = d;
880
881 data->func(obj, data->data);
882
883 return 0;
884}
885
886void
887rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
888{
889 struct rb_gc_impl_each_object_data each_object_data = {
890 .func = func,
891 .data = data
892 };
893
894 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
895}
896
897// Finalizers
898static VALUE
899gc_run_finalizers_get_final(long i, void *data)
900{
901 VALUE table = (VALUE)data;
902
903 return RARRAY_AREF(table, i);
904}
905
906static void
907gc_run_finalizers(void *data)
908{
909 struct objspace *objspace = data;
910
911 rb_gc_set_pending_interrupt();
912
913 while (objspace->finalizer_jobs != NULL) {
914 struct MMTk_final_job *job = objspace->finalizer_jobs;
915 objspace->finalizer_jobs = job->next;
916
917 switch (job->kind) {
918 case MMTK_FINAL_JOB_DFREE:
919 job->as.dfree.func(job->as.dfree.data);
920 break;
921 case MMTK_FINAL_JOB_FINALIZE: {
922 VALUE object_id = job->as.finalize.object_id;
923 VALUE finalizer_array = job->as.finalize.finalizer_array;
924
925 rb_gc_run_obj_finalizer(
926 job->as.finalize.object_id,
927 RARRAY_LEN(finalizer_array),
928 gc_run_finalizers_get_final,
929 (void *)finalizer_array
930 );
931
932 RB_GC_GUARD(object_id);
933 RB_GC_GUARD(finalizer_array);
934 break;
935 }
936 }
937
938 xfree(job);
939 }
940
941 rb_gc_unset_pending_interrupt();
942}
943
944void
945rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
946{
947 if (dfree == NULL) return;
948
949 struct objspace *objspace = objspace_ptr;
950
951 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
952 job->kind = MMTK_FINAL_JOB_DFREE;
953 job->as.dfree.func = dfree;
954 job->as.dfree.data = data;
955
956 struct MMTk_final_job *prev;
957 do {
958 job->next = objspace->finalizer_jobs;
959 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
960 } while (prev != job->next);
961
962 if (!ruby_free_at_exit_p()) {
963 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
964 }
965}
966
967VALUE
968rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
969{
970 struct objspace *objspace = objspace_ptr;
971 VALUE table;
972 st_data_t data;
973
974 RBASIC(obj)->flags |= FL_FINALIZE;
975
976 int lev = rb_gc_vm_lock();
977
978 if (st_lookup(objspace->finalizer_table, obj, &data)) {
979 table = (VALUE)data;
980
981 /* avoid duplicate block, table is usually small */
982 {
983 long len = RARRAY_LEN(table);
984 long i;
985
986 for (i = 0; i < len; i++) {
987 VALUE recv = RARRAY_AREF(table, i);
988 if (rb_equal(recv, block)) {
989 rb_gc_vm_unlock(lev);
990 return recv;
991 }
992 }
993 }
994
995 rb_ary_push(table, block);
996 }
997 else {
998 table = rb_ary_new3(1, block);
999 rb_obj_hide(table);
1000 st_add_direct(objspace->finalizer_table, obj, table);
1001 }
1002
1003 rb_gc_vm_unlock(lev);
1004
1005 return block;
1006}
1007
1008void
1009rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1010{
1011 struct objspace *objspace = objspace_ptr;
1012
1013 st_data_t data = obj;
1014 st_delete(objspace->finalizer_table, &data, 0);
1015 FL_UNSET(obj, FL_FINALIZE);
1016}
1017
1018void
1019rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1020{
1021 struct objspace *objspace = objspace_ptr;
1022 VALUE table;
1023 st_data_t data;
1024
1025 if (!FL_TEST(obj, FL_FINALIZE)) return;
1026
1027 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1028 table = (VALUE)data;
1029 st_insert(objspace->finalizer_table, dest, table);
1030 FL_SET(dest, FL_FINALIZE);
1031 }
1032 else {
1033 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1034 }
1035}
1036
1037static int
1038move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1039{
1040 struct objspace *objspace = (struct objspace *)arg;
1041
1042 make_final_job(objspace, (VALUE)key, (VALUE)val);
1043
1044 return ST_DELETE;
1045}
1046
1047void
1048rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1049{
1050 struct objspace *objspace = objspace_ptr;
1051
1052 while (objspace->finalizer_table->num_entries) {
1053 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1054
1055 gc_run_finalizers(objspace);
1056 }
1057
1058 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1059 for (size_t i = 0; i < registered_candidates.len; i++) {
1060 VALUE obj = (VALUE)registered_candidates.ptr[i];
1061
1062 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1063 rb_gc_obj_free(objspace_ptr, obj);
1064 RBASIC(obj)->flags = 0;
1065 }
1066 }
1067 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1068
1069 gc_run_finalizers(objspace);
1070}
1071
1072// Object ID
1073static int
1074object_id_cmp(st_data_t x, st_data_t y)
1075{
1076 if (RB_TYPE_P(x, T_BIGNUM)) {
1077 return !rb_big_eql(x, y);
1078 }
1079 else {
1080 return x != y;
1081 }
1082}
1083
1084static st_index_t
1085object_id_hash(st_data_t n)
1086{
1087 return FIX2LONG(rb_hash((VALUE)n));
1088}
1089
1090#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1091#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1092
1093static const struct st_hash_type object_id_hash_type = {
1094 object_id_cmp,
1095 object_id_hash,
1096};
1097
1098static void
1099objspace_obj_id_init(struct objspace *objspace)
1100{
1101 objspace->id_to_obj_tbl = NULL;
1102 objspace->obj_to_id_tbl = st_init_numtable();
1103 objspace->next_object_id = OBJ_ID_INITIAL;
1104}
1105
1106VALUE
1107rb_gc_impl_object_id(void *objspace_ptr, VALUE obj)
1108{
1109 VALUE id;
1110 struct objspace *objspace = objspace_ptr;
1111
1112 unsigned int lev = rb_gc_vm_lock();
1113 if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
1114 st_data_t val;
1115 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &val)) {
1116 id = (VALUE)val;
1117 }
1118 else {
1119 rb_bug("rb_gc_impl_object_id: FL_SEEN_OBJ_ID flag set but not found in table");
1120 }
1121 }
1122 else {
1123 RUBY_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, NULL));
1124
1125 id = ULL2NUM(objspace->next_object_id);
1126 objspace->next_object_id += OBJ_ID_INCREMENT;
1127
1128 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
1129 if (RB_UNLIKELY(objspace->id_to_obj_tbl)) {
1130 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
1131 }
1132 FL_SET(obj, FL_SEEN_OBJ_ID);
1133 }
1134 rb_gc_vm_unlock(lev);
1135
1136 return id;
1137}
1138
1139static int
1140build_id_to_obj_i(st_data_t key, st_data_t value, st_data_t data)
1141{
1142 st_table *id_to_obj_tbl = (st_table *)data;
1143 st_insert(id_to_obj_tbl, value, key);
1144 return ST_CONTINUE;
1145}
1146
1147VALUE
1148rb_gc_impl_object_id_to_ref(void *objspace_ptr, VALUE object_id)
1149{
1150 struct objspace *objspace = objspace_ptr;
1151
1152
1153 unsigned int lev = rb_gc_vm_lock();
1154
1155 if (!objspace->id_to_obj_tbl) {
1156 objspace->id_to_obj_tbl = st_init_table_with_size(&object_id_hash_type, st_table_size(objspace->obj_to_id_tbl));
1157 st_foreach(objspace->obj_to_id_tbl, build_id_to_obj_i, (st_data_t)objspace->id_to_obj_tbl);
1158 }
1159
1160 VALUE obj;
1161 bool found = st_lookup(objspace->id_to_obj_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
1162
1163 rb_gc_vm_unlock(lev);
1164
1165 if (found) {
1166 return obj;
1167 }
1168
1169 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(objspace->next_object_id))) {
1170 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1171 }
1172 else {
1173 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1174 }
1175}
1176
1177// Forking
1178
1179void
1180rb_gc_impl_before_fork(void *objspace_ptr)
1181{
1182 mmtk_before_fork();
1183}
1184
1185void
1186rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1187{
1188 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1189}
1190
1191// Statistics
1192
1193void
1194rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1195{
1196 struct objspace *objspace = objspace_ptr;
1197
1198 objspace->measure_gc_time = RTEST(flag);
1199}
1200
1201bool
1202rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1203{
1204 struct objspace *objspace = objspace_ptr;
1205
1206 return objspace->measure_gc_time;
1207}
1208
1209unsigned long long
1210rb_gc_impl_get_total_time(void *objspace_ptr)
1211{
1212 struct objspace *objspace = objspace_ptr;
1213
1214 return objspace->total_gc_time;
1215}
1216
1217size_t
1218rb_gc_impl_gc_count(void *objspace_ptr)
1219{
1220 struct objspace *objspace = objspace_ptr;
1221
1222 return objspace->gc_count;
1223}
1224
1225VALUE
1226rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1227{
1228 VALUE hash = Qnil, key = Qnil;
1229
1230 if (SYMBOL_P(hash_or_key)) {
1231 key = hash_or_key;
1232 }
1233 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1234 hash = hash_or_key;
1235 }
1236 else {
1237 rb_bug("gc_info_decode: non-hash or symbol given");
1238 }
1239
1240#define SET(name, attr) \
1241 if (key == ID2SYM(rb_intern_const(#name))) \
1242 return (attr); \
1243 else if (hash != Qnil) \
1244 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1245
1246 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1247 * the :state key and expects a result. This always returns the :none state. */
1248 SET(state, ID2SYM(rb_intern_const("none")));
1249#undef SET
1250
1251 if (!NIL_P(key)) {
1252 // Matched key should return above
1253 return Qundef;
1254 }
1255
1256 return hash;
1257}
1258
1259enum gc_stat_sym {
1260 gc_stat_sym_count,
1261 gc_stat_sym_time,
1262 gc_stat_sym_total_allocated_objects,
1263 gc_stat_sym_total_bytes,
1264 gc_stat_sym_used_bytes,
1265 gc_stat_sym_free_bytes,
1266 gc_stat_sym_starting_heap_address,
1267 gc_stat_sym_last_heap_address,
1268 gc_stat_sym_last
1269};
1270
1271static VALUE gc_stat_symbols[gc_stat_sym_last];
1272
1273static void
1274setup_gc_stat_symbols(void)
1275{
1276 if (gc_stat_symbols[0] == 0) {
1277#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1278 S(count);
1279 S(time);
1280 S(total_allocated_objects);
1281 S(total_bytes);
1282 S(used_bytes);
1283 S(free_bytes);
1284 S(starting_heap_address);
1285 S(last_heap_address);
1286 }
1287}
1288
1289VALUE
1290rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1291{
1292 struct objspace *objspace = objspace_ptr;
1293 VALUE hash = Qnil, key = Qnil;
1294
1295 setup_gc_stat_symbols();
1296
1297 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1298 hash = hash_or_sym;
1299 }
1300 else if (SYMBOL_P(hash_or_sym)) {
1301 key = hash_or_sym;
1302 }
1303 else {
1304 rb_bug("non-hash or symbol given");
1305 }
1306
1307#define SET(name, attr) \
1308 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1309 return SIZET2NUM(attr); \
1310 else if (hash != Qnil) \
1311 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1312
1313 SET(count, objspace->gc_count);
1314 SET(time, objspace->total_gc_time / (1000 * 1000));
1315 SET(total_allocated_objects, objspace->total_allocated_objects);
1316 SET(total_bytes, mmtk_total_bytes());
1317 SET(used_bytes, mmtk_used_bytes());
1318 SET(free_bytes, mmtk_free_bytes());
1319 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1320 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1321#undef SET
1322
1323 if (!NIL_P(key)) {
1324 // Matched key should return above
1325 return Qundef;
1326 }
1327
1328 return hash;
1329}
1330
1331VALUE
1332rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1333{
1334 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1335 return hash_or_sym;
1336 }
1337 else {
1338 return Qundef;
1339 }
1340}
1341
1342// Miscellaneous
1343
1344#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1345static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1346
1348rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1349{
1350 static ID ID_object_id;
1351
1352 if (!ID_object_id) {
1353#define I(s) ID_##s = rb_intern(#s);
1354 I(object_id);
1355#undef I
1356 }
1357
1358 size_t n = 0;
1359
1360#define SET_ENTRY(na, v) do { \
1361 RUBY_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1362 object_metadata_entries[n].name = ID_##na; \
1363 object_metadata_entries[n].val = v; \
1364 n++; \
1365} while (0)
1366
1367 if (FL_TEST(obj, FL_SEEN_OBJ_ID)) SET_ENTRY(object_id, rb_obj_id(obj));
1368
1369 object_metadata_entries[n].name = 0;
1370 object_metadata_entries[n].val = 0;
1371
1372 return object_metadata_entries;
1373}
1374
1375bool
1376rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1377{
1378 if (ptr == NULL) return false;
1379 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1380 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1381}
1382
1383bool
1384rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1385{
1386 return false;
1387}
1388
1389void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1390
1391void
1392rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1393{
1394 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1395 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1396 }
1397
1398 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1399}
1400
1401// GC Identification
1402
1403const char *
1404rb_gc_impl_active_gc_name(void)
1405{
1406 return "mmtk";
1407}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:315
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1783
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1749
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:495
static void RB_FL_UNSET(VALUE obj, VALUE flags)
Clears the given flag(s).
Definition fl_type.h:681
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:239
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:65
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_mGC
GC module.
Definition gc.c:432
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:179
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:476
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5561
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:45
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:41
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376