Ruby 4.0.0dev (2025-11-29 revision 82b91ec7e55cb5ef4acd61213843614542bea3b3)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *finalizer_table;
28 struct MMTk_final_job *finalizer_jobs;
29 rb_postponed_job_handle_t finalizer_postponed_job;
30
31 struct ccan_list_head ractor_caches;
32 unsigned long live_ractor_cache_count;
33
34 pthread_mutex_t mutex;
35 rb_atomic_t mutator_blocking_count;
36 bool world_stopped;
37 pthread_cond_t cond_world_stopped;
38 pthread_cond_t cond_world_started;
39 size_t start_the_world_count;
40
41 struct rb_gc_vm_context vm_context;
42
43 unsigned int fork_hook_vm_lock_lev;
44};
45
47 struct ccan_list_node list_node;
48
49 MMTk_Mutator *mutator;
50 bool gc_mutator_p;
51};
52
54 struct MMTk_final_job *next;
55 enum {
56 MMTK_FINAL_JOB_DFREE,
57 MMTK_FINAL_JOB_FINALIZE,
58 } kind;
59 union {
60 struct {
61 void (*func)(void *);
62 void *data;
63 } dfree;
64 struct {
65 /* HACK: we store the object ID on the 0th element of this array. */
66 VALUE finalizer_array;
67 } finalize;
68 } as;
69};
70
71#ifdef RB_THREAD_LOCAL_SPECIFIER
72RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
73#else
74# error We currently need language-supported TLS
75#endif
76
77#include <pthread.h>
78
79static void
80rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
81{
82 rb_mmtk_gc_thread_tls = gc_thread_tls;
83}
84
85static bool
86rb_mmtk_is_mutator(void)
87{
88 return ruby_native_thread_p();
89}
90
91static void
92rb_mmtk_stop_the_world(void)
93{
94 struct objspace *objspace = rb_gc_get_objspace();
95
96 int err;
97 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
98 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
99 }
100
101 while (!objspace->world_stopped) {
102 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
103 }
104
105 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
106 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
107 }
108}
109
110static void
111rb_mmtk_resume_mutators(void)
112{
113 struct objspace *objspace = rb_gc_get_objspace();
114
115 int err;
116 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
117 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
118 }
119
120 objspace->world_stopped = false;
121 objspace->gc_count++;
122 pthread_cond_broadcast(&objspace->cond_world_started);
123
124 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
125 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
126 }
127}
128
129static void
130rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
131{
132 struct objspace *objspace = rb_gc_get_objspace();
133
134 size_t starting_gc_count = objspace->gc_count;
135 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
136 int lock_lev = RB_GC_VM_LOCK();
137 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
138 int err;
139 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
140 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
141 }
142
143 if (objspace->gc_count == starting_gc_count) {
144 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
145
146 rb_gc_initialize_vm_context(&objspace->vm_context);
147
148 mutator->gc_mutator_p = true;
149
150 struct timespec gc_start_time;
151 if (objspace->measure_gc_time) {
152 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
153 }
154
155 rb_gc_save_machine_context();
156
157 rb_gc_vm_barrier();
158
159 objspace->world_stopped = true;
160
161 pthread_cond_broadcast(&objspace->cond_world_stopped);
162
163 // Wait for GC end
164 while (objspace->world_stopped) {
165 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
166 }
167
168 if (objspace->measure_gc_time) {
169 struct timespec gc_end_time;
170 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
171
172 objspace->total_gc_time +=
173 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
174 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
175 }
176 }
177
178 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
179 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
180 }
181 RB_GC_VM_UNLOCK(lock_lev);
182}
183
184static size_t
185rb_mmtk_number_of_mutators(void)
186{
187 struct objspace *objspace = rb_gc_get_objspace();
188 return objspace->live_ractor_cache_count;
189}
190
191static void
192rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
193{
194 struct objspace *objspace = rb_gc_get_objspace();
195 struct MMTk_ractor_cache *ractor_cache;
196
197 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
198 visit_mutator(ractor_cache->mutator, data);
199 }
200}
201
202static void
203rb_mmtk_scan_gc_roots(void)
204{
205 struct objspace *objspace = rb_gc_get_objspace();
206
207 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
208 // See: https://github.com/ruby/mmtk/issues/22
209 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
210 rb_gc_mark_roots(objspace, NULL);
211 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
212}
213
214static int
215pin_value(st_data_t key, st_data_t value, st_data_t data)
216{
217 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
218
219 return ST_CONTINUE;
220}
221
222static void
223rb_mmtk_scan_objspace(void)
224{
225 struct objspace *objspace = rb_gc_get_objspace();
226
227 if (objspace->finalizer_table != NULL) {
228 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
229 }
230
231 struct MMTk_final_job *job = objspace->finalizer_jobs;
232 while (job != NULL) {
233 switch (job->kind) {
234 case MMTK_FINAL_JOB_DFREE:
235 break;
236 case MMTK_FINAL_JOB_FINALIZE:
237 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
238 break;
239 default:
240 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
241 }
242
243 job = job->next;
244 }
245}
246
247static void
248rb_mmtk_scan_object_ruby_style(MMTk_ObjectReference object)
249{
250 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
251}
252
253static void
254rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
255{
256 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
257}
258
259static void
260rb_mmtk_call_obj_free(MMTk_ObjectReference object)
261{
262 VALUE obj = (VALUE)object;
263 struct objspace *objspace = rb_gc_get_objspace();
264
265 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
266 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
267 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
268 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
269 }
270
271 rb_gc_obj_free(objspace, obj);
272}
273
274static size_t
275rb_mmtk_vm_live_bytes(void)
276{
277 return 0;
278}
279
280static void
281make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
282{
284 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)table));
286
288
289 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
290 job->next = objspace->finalizer_jobs;
291 job->kind = MMTK_FINAL_JOB_FINALIZE;
292 job->as.finalize.finalizer_array = table;
293
294 objspace->finalizer_jobs = job;
295}
296
297static int
298rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data)
299{
301 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
303
304 struct objspace *objspace = (struct objspace *)data;
305
306 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
307 make_final_job(objspace, (VALUE)key, (VALUE)value);
308
309 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
310
311 return ST_DELETE;
312 }
313
314 return ST_CONTINUE;
315}
316
317static void
318rb_mmtk_update_finalizer_table(void)
319{
320 struct objspace *objspace = rb_gc_get_objspace();
321
322 // TODO: replace with st_foreach_with_replace when GC is moving
323 st_foreach(objspace->finalizer_table, rb_mmtk_update_finalizer_table_i, (st_data_t)objspace);
324}
325
326static int
327rb_mmtk_update_table_i(VALUE val, void *data)
328{
329 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
330 return ST_DELETE;
331 }
332
333 return ST_CONTINUE;
334}
335
336static int
337rb_mmtk_global_tables_count(void)
338{
339 return RB_GC_VM_WEAK_TABLE_COUNT;
340}
341
342static void
343rb_mmtk_update_global_tables(int table)
344{
345 RUBY_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
346
347 rb_gc_vm_weak_table_foreach(rb_mmtk_update_table_i, NULL, NULL, true, (enum rb_gc_vm_weak_tables)table);
348}
349
350static bool
351rb_mmtk_special_const_p(MMTk_ObjectReference object)
352{
353 VALUE obj = (VALUE)object;
354
355 return RB_SPECIAL_CONST_P(obj);
356}
357
358// Bootup
359MMTk_RubyUpcalls ruby_upcalls = {
360 rb_mmtk_init_gc_worker_thread,
361 rb_mmtk_is_mutator,
362 rb_mmtk_stop_the_world,
363 rb_mmtk_resume_mutators,
364 rb_mmtk_block_for_gc,
365 rb_mmtk_number_of_mutators,
366 rb_mmtk_get_mutators,
367 rb_mmtk_scan_gc_roots,
368 rb_mmtk_scan_objspace,
369 rb_mmtk_scan_object_ruby_style,
370 rb_mmtk_call_gc_mark_children,
371 rb_mmtk_call_obj_free,
372 rb_mmtk_vm_live_bytes,
373 rb_mmtk_update_global_tables,
374 rb_mmtk_global_tables_count,
375 rb_mmtk_update_finalizer_table,
376 rb_mmtk_special_const_p,
377};
378
379// Use max 80% of the available memory by default for MMTk
380#define RB_MMTK_HEAP_LIMIT_PERC 80
381#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
382#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
383
384enum mmtk_heap_mode {
385 RB_MMTK_DYNAMIC_HEAP,
386 RB_MMTK_FIXED_HEAP
387};
388
389MMTk_Builder *
390rb_mmtk_builder_init(void)
391{
392 MMTk_Builder *builder = mmtk_builder_default();
393 return builder;
394}
395
396void *
397rb_gc_impl_objspace_alloc(void)
398{
399 MMTk_Builder *builder = rb_mmtk_builder_init();
400 mmtk_init_binding(builder, NULL, &ruby_upcalls, (MMTk_ObjectReference)Qundef);
401
402 return calloc(1, sizeof(struct objspace));
403}
404
405static void gc_run_finalizers(void *data);
406
407void
408rb_gc_impl_objspace_init(void *objspace_ptr)
409{
410 struct objspace *objspace = objspace_ptr;
411
412 objspace->measure_gc_time = true;
413
414 objspace->finalizer_table = st_init_numtable();
415 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
416
417 ccan_list_head_init(&objspace->ractor_caches);
418
419 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
420 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
421 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
422}
423
424void
425rb_gc_impl_objspace_free(void *objspace_ptr)
426{
427 free(objspace_ptr);
428}
429
430void *
431rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
432{
433 struct objspace *objspace = objspace_ptr;
434 if (objspace->live_ractor_cache_count == 0) {
435 mmtk_initialize_collection(ractor);
436 }
437 objspace->live_ractor_cache_count++;
438
439 struct MMTk_ractor_cache *cache = malloc(sizeof(struct MMTk_ractor_cache));
440 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
441
442 cache->mutator = mmtk_bind_mutator(cache);
443
444 return cache;
445}
446
447void
448rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
449{
450 struct objspace *objspace = objspace_ptr;
451 struct MMTk_ractor_cache *cache = cache_ptr;
452
453 ccan_list_del(&cache->list_node);
454
455 RUBY_ASSERT(objspace->live_ractor_cache_count > 1);
456 objspace->live_ractor_cache_count--;
457
458 mmtk_destroy_mutator(cache->mutator);
459}
460
461void rb_gc_impl_set_params(void *objspace_ptr) { }
462
463static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
464
465void
466rb_gc_impl_init(void)
467{
468 VALUE gc_constants = rb_hash_new();
469 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
470 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
471 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
472 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(640));
473 // Pretend we have 5 size pools
474 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
475 OBJ_FREEZE(gc_constants);
476 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
477
478 // no-ops for compatibility
479 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
480
484 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
485 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
486}
487
488static size_t heap_sizes[6] = {
489 40, 80, 160, 320, 640, 0
490};
491
492size_t *
493rb_gc_impl_heap_sizes(void *objspace_ptr)
494{
495 return heap_sizes;
496}
497
498int
499rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
500{
501 struct objspace *objspace = data;
502
503 if (!RB_TYPE_P(obj, T_NONE)) {
504 rb_gc_obj_free_vm_weak_references(obj);
505 rb_gc_obj_free(objspace, obj);
506 }
507
508 return 0;
509}
510
511// Shutdown
512static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
513
514void
515rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
516{
517 mmtk_set_gc_enabled(false);
518 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
519 mmtk_set_gc_enabled(true);
520}
521
522// GC
523void
524rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
525{
526 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
527}
528
529bool
530rb_gc_impl_during_gc_p(void *objspace_ptr)
531{
532 // TODO
533 return false;
534}
535
536static void
537rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
538{
539 rb_gc_prepare_heap_process_object((VALUE)obj);
540}
541
542void
543rb_gc_impl_prepare_heap(void *objspace_ptr)
544{
545 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
546}
547
548void
549rb_gc_impl_gc_enable(void *objspace_ptr)
550{
551 mmtk_set_gc_enabled(true);
552}
553
554void
555rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
556{
557 mmtk_set_gc_enabled(false);
558}
559
560bool
561rb_gc_impl_gc_enabled_p(void *objspace_ptr)
562{
563 return mmtk_gc_enabled_p();
564}
565
566void
567rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
568{
569 struct objspace *objspace = objspace_ptr;
570
571 objspace->gc_stress = RTEST(flag);
572}
573
574VALUE
575rb_gc_impl_stress_get(void *objspace_ptr)
576{
577 struct objspace *objspace = objspace_ptr;
578
579 return objspace->gc_stress ? Qtrue : Qfalse;
580}
581
582VALUE
583rb_gc_impl_config_get(void *objspace_ptr)
584{
585 VALUE hash = rb_hash_new();
586
587 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
588 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
589 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
590 size_t heap_min = mmtk_heap_min();
591 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
592 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
593
594 return hash;
595}
596
597void
598rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
599{
600 // TODO
601}
602
603// Object allocation
604
605VALUE
606rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
607{
608#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
609 struct objspace *objspace = objspace_ptr;
610 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
611
612 if (alloc_size > 640) rb_bug("too big");
613 for (int i = 0; i < 5; i++) {
614 if (alloc_size == heap_sizes[i]) break;
615 if (alloc_size < heap_sizes[i]) {
616 alloc_size = heap_sizes[i];
617 break;
618 }
619 }
620
621 if (objspace->gc_stress) {
622 mmtk_handle_user_collection_request(ractor_cache, false, false);
623 }
624
625 VALUE *alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size + 8, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
626 alloc_obj++;
627 alloc_obj[-1] = alloc_size;
628 alloc_obj[0] = flags;
629 alloc_obj[1] = klass;
630
631 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size + 8, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
632
633 // TODO: only add when object needs obj_free to be called
634 mmtk_add_obj_free_candidate(alloc_obj);
635
636 objspace->total_allocated_objects++;
637
638 return (VALUE)alloc_obj;
639}
640
641size_t
642rb_gc_impl_obj_slot_size(VALUE obj)
643{
644 return ((VALUE *)obj)[-1];
645}
646
647size_t
648rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
649{
650 for (int i = 0; i < 5; i++) {
651 if (size == heap_sizes[i]) return i;
652 if (size < heap_sizes[i]) return i;
653 }
654
655 rb_bug("size too big");
656}
657
658bool
659rb_gc_impl_size_allocatable_p(size_t size)
660{
661 return size <= 640;
662}
663
664// Malloc
665void *
666rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
667{
668 // TODO: don't use system malloc
669 return malloc(size);
670}
671
672void *
673rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
674{
675 // TODO: don't use system calloc
676 return calloc(1, size);
677}
678
679void *
680rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
681{
682 // TODO: don't use system realloc
683 return realloc(ptr, new_size);
684}
685
686void
687rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
688{
689 // TODO: don't use system free
690 free(ptr);
691}
692
693void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
694
695// Marking
696void
697rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
698{
699 if (RB_SPECIAL_CONST_P(obj)) return;
700
701 rb_mmtk_gc_thread_tls->object_closure.c_function(rb_mmtk_gc_thread_tls->object_closure.rust_closure,
702 rb_mmtk_gc_thread_tls->gc_context,
703 (MMTk_ObjectReference)obj,
704 false);
705}
706
707void
708rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
709{
710 if (RB_SPECIAL_CONST_P(*ptr)) return;
711
712 // TODO: make it movable
713 rb_gc_impl_mark(objspace_ptr, *ptr);
714}
715
716void
717rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
718{
719 if (RB_SPECIAL_CONST_P(obj)) return;
720
721 // TODO: also pin
722 rb_gc_impl_mark(objspace_ptr, obj);
723}
724
725void
726rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
727{
728 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
729 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
730 }
731}
732
733void
734rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
735{
736 mmtk_mark_weak((MMTk_ObjectReference *)ptr);
737}
738
739void
740rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
741{
742 mmtk_remove_weak((MMTk_ObjectReference *)ptr);
743}
744
745// Compaction
746bool
747rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
748{
749 rb_bug("unimplemented");
750}
751
752VALUE
753rb_gc_impl_location(void *objspace_ptr, VALUE value)
754{
755 rb_bug("unimplemented");
756}
757
758// Write barriers
759void
760rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
761{
762 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
763
764 if (SPECIAL_CONST_P(b)) return;
765
766 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
767}
768
769void
770rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
771{
772 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
773}
774
775void
776rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
777{
778 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
779
780 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
781}
782
783// Heap walking
784static void
785each_objects_i(MMTk_ObjectReference obj, void *d)
786{
787 rb_darray(VALUE) *objs = d;
788
789 rb_darray_append(objs, (VALUE)obj);
790}
791
792static void
793each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
794{
795 rb_darray(VALUE) objs;
796 rb_darray_make(&objs, 0);
797
798 mmtk_enumerate_objects(each_objects_i, &objs);
799
800 VALUE *obj_ptr;
801 rb_darray_foreach(objs, i, obj_ptr) {
802 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
803
804 if (func(*obj_ptr, data) != 0) {
805 break;
806 }
807 }
808
809 rb_darray_free(objs);
810}
811
813 int (*func)(void *, void *, size_t, void *);
814 void *data;
815};
816
817static int
818rb_gc_impl_each_objects_i(VALUE obj, void *d)
819{
820 struct rb_gc_impl_each_objects_data *data = d;
821
822 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
823
824 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
825}
826
827void
828rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
829{
830 struct rb_gc_impl_each_objects_data each_objects_data = {
831 .func = func,
832 .data = data
833 };
834
835 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
836}
837
839 void (*func)(VALUE, void *);
840 void *data;
841};
842
843static int
844rb_gc_impl_each_object_i(VALUE obj, void *d)
845{
846 struct rb_gc_impl_each_object_data *data = d;
847
848 data->func(obj, data->data);
849
850 return 0;
851}
852
853void
854rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
855{
856 struct rb_gc_impl_each_object_data each_object_data = {
857 .func = func,
858 .data = data
859 };
860
861 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
862}
863
864// Finalizers
865static VALUE
866gc_run_finalizers_get_final(long i, void *data)
867{
868 VALUE table = (VALUE)data;
869
870 return RARRAY_AREF(table, i + 1);
871}
872
873static void
874gc_run_finalizers(void *data)
875{
876 struct objspace *objspace = data;
877
878 rb_gc_set_pending_interrupt();
879
880 while (objspace->finalizer_jobs != NULL) {
881 struct MMTk_final_job *job = objspace->finalizer_jobs;
882 objspace->finalizer_jobs = job->next;
883
884 switch (job->kind) {
885 case MMTK_FINAL_JOB_DFREE:
886 job->as.dfree.func(job->as.dfree.data);
887 break;
888 case MMTK_FINAL_JOB_FINALIZE: {
889 VALUE finalizer_array = job->as.finalize.finalizer_array;
890
891 rb_gc_run_obj_finalizer(
892 RARRAY_AREF(finalizer_array, 0),
893 RARRAY_LEN(finalizer_array) - 1,
894 gc_run_finalizers_get_final,
895 (void *)finalizer_array
896 );
897
898 RB_GC_GUARD(finalizer_array);
899 break;
900 }
901 }
902
903 xfree(job);
904 }
905
906 rb_gc_unset_pending_interrupt();
907}
908
909void
910rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
911{
912 if (dfree == NULL) return;
913
914 struct objspace *objspace = objspace_ptr;
915
916 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
917 job->kind = MMTK_FINAL_JOB_DFREE;
918 job->as.dfree.func = dfree;
919 job->as.dfree.data = data;
920
921 struct MMTk_final_job *prev;
922 do {
923 job->next = objspace->finalizer_jobs;
924 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
925 } while (prev != job->next);
926
927 if (!ruby_free_at_exit_p()) {
928 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
929 }
930}
931
932VALUE
933rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
934{
935 struct objspace *objspace = objspace_ptr;
936 VALUE table;
937 st_data_t data;
938
939 RBASIC(obj)->flags |= FL_FINALIZE;
940
941 int lev = RB_GC_VM_LOCK();
942
943 if (st_lookup(objspace->finalizer_table, obj, &data)) {
944 table = (VALUE)data;
945
946 /* avoid duplicate block, table is usually small */
947 {
948 long len = RARRAY_LEN(table);
949 long i;
950
951 for (i = 0; i < len; i++) {
952 VALUE recv = RARRAY_AREF(table, i);
953 if (rb_equal(recv, block)) {
954 RB_GC_VM_UNLOCK(lev);
955 return recv;
956 }
957 }
958 }
959
960 rb_ary_push(table, block);
961 }
962 else {
963 table = rb_ary_new3(2, rb_obj_id(obj), block);
964 rb_obj_hide(table);
965 st_add_direct(objspace->finalizer_table, obj, table);
966 }
967
968 RB_GC_VM_UNLOCK(lev);
969
970 return block;
971}
972
973void
974rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
975{
976 struct objspace *objspace = objspace_ptr;
977
978 st_data_t data = obj;
979
980 int lev = RB_GC_VM_LOCK();
981 st_delete(objspace->finalizer_table, &data, 0);
982 RB_GC_VM_UNLOCK(lev);
983
984 FL_UNSET(obj, FL_FINALIZE);
985}
986
987void
988rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
989{
990 struct objspace *objspace = objspace_ptr;
991 VALUE table;
992 st_data_t data;
993
994 if (!FL_TEST(obj, FL_FINALIZE)) return;
995
996 int lev = RB_GC_VM_LOCK();
997 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
998 table = rb_ary_dup((VALUE)data);
999 RARRAY_ASET(table, 0, rb_obj_id(dest));
1000 st_insert(objspace->finalizer_table, dest, table);
1001 FL_SET(dest, FL_FINALIZE);
1002 }
1003 else {
1004 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1005 }
1006 RB_GC_VM_UNLOCK(lev);
1007}
1008
1009static int
1010move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1011{
1012 struct objspace *objspace = (struct objspace *)arg;
1013
1014 make_final_job(objspace, (VALUE)key, (VALUE)val);
1015
1016 return ST_DELETE;
1017}
1018
1019void
1020rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1021{
1022 struct objspace *objspace = objspace_ptr;
1023
1024 while (objspace->finalizer_table->num_entries) {
1025 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1026
1027 gc_run_finalizers(objspace);
1028 }
1029
1030 unsigned int lev = RB_GC_VM_LOCK();
1031 {
1032 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1033 for (size_t i = 0; i < registered_candidates.len; i++) {
1034 VALUE obj = (VALUE)registered_candidates.ptr[i];
1035
1036 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1037 rb_gc_obj_free(objspace_ptr, obj);
1038 RBASIC(obj)->flags = 0;
1039 }
1040 }
1041 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1042 }
1043 RB_GC_VM_UNLOCK(lev);
1044
1045 gc_run_finalizers(objspace);
1046}
1047
1048// Forking
1049
1050void
1051rb_gc_impl_before_fork(void *objspace_ptr)
1052{
1053 struct objspace *objspace = objspace_ptr;
1054
1055 retry:
1056 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1057 rb_gc_vm_barrier();
1058
1059 /* At this point, we know that all the Ractors are paused because of the
1060 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1061 * one or more Ractors could be paused there. However, mmtk_before_fork is
1062 * not compatible with that because it assumes that the MMTk workers are idle,
1063 * but the workers are not idle because they are busy working on a GC.
1064 *
1065 * This essentially implements a trylock. It will optimistically lock but will
1066 * release the lock if it detects that any other Ractors are waiting in
1067 * rb_mmtk_block_for_gc.
1068 */
1069 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1070 if (mutator_blocking_count != 0) {
1071 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1072 goto retry;
1073 }
1074
1075 mmtk_before_fork();
1076}
1077
1078void
1079rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1080{
1081 struct objspace *objspace = objspace_ptr;
1082
1083 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1084
1085 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1086}
1087
1088// Statistics
1089
1090void
1091rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1092{
1093 struct objspace *objspace = objspace_ptr;
1094
1095 objspace->measure_gc_time = RTEST(flag);
1096}
1097
1098bool
1099rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1100{
1101 struct objspace *objspace = objspace_ptr;
1102
1103 return objspace->measure_gc_time;
1104}
1105
1106unsigned long long
1107rb_gc_impl_get_total_time(void *objspace_ptr)
1108{
1109 struct objspace *objspace = objspace_ptr;
1110
1111 return objspace->total_gc_time;
1112}
1113
1114size_t
1115rb_gc_impl_gc_count(void *objspace_ptr)
1116{
1117 struct objspace *objspace = objspace_ptr;
1118
1119 return objspace->gc_count;
1120}
1121
1122VALUE
1123rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1124{
1125 VALUE hash = Qnil, key = Qnil;
1126
1127 if (SYMBOL_P(hash_or_key)) {
1128 key = hash_or_key;
1129 }
1130 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1131 hash = hash_or_key;
1132 }
1133 else {
1134 rb_bug("gc_info_decode: non-hash or symbol given");
1135 }
1136
1137#define SET(name, attr) \
1138 if (key == ID2SYM(rb_intern_const(#name))) \
1139 return (attr); \
1140 else if (hash != Qnil) \
1141 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1142
1143 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1144 * the :state key and expects a result. This always returns the :none state. */
1145 SET(state, ID2SYM(rb_intern_const("none")));
1146#undef SET
1147
1148 if (!NIL_P(key)) {
1149 // Matched key should return above
1150 return Qundef;
1151 }
1152
1153 return hash;
1154}
1155
1156enum gc_stat_sym {
1157 gc_stat_sym_count,
1158 gc_stat_sym_time,
1159 gc_stat_sym_total_allocated_objects,
1160 gc_stat_sym_total_bytes,
1161 gc_stat_sym_used_bytes,
1162 gc_stat_sym_free_bytes,
1163 gc_stat_sym_starting_heap_address,
1164 gc_stat_sym_last_heap_address,
1165 gc_stat_sym_last
1166};
1167
1168static VALUE gc_stat_symbols[gc_stat_sym_last];
1169
1170static void
1171setup_gc_stat_symbols(void)
1172{
1173 if (gc_stat_symbols[0] == 0) {
1174#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1175 S(count);
1176 S(time);
1177 S(total_allocated_objects);
1178 S(total_bytes);
1179 S(used_bytes);
1180 S(free_bytes);
1181 S(starting_heap_address);
1182 S(last_heap_address);
1183 }
1184}
1185
1186VALUE
1187rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1188{
1189 struct objspace *objspace = objspace_ptr;
1190 VALUE hash = Qnil, key = Qnil;
1191
1192 setup_gc_stat_symbols();
1193
1194 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1195 hash = hash_or_sym;
1196 }
1197 else if (SYMBOL_P(hash_or_sym)) {
1198 key = hash_or_sym;
1199 }
1200 else {
1201 rb_bug("non-hash or symbol given");
1202 }
1203
1204#define SET(name, attr) \
1205 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1206 return SIZET2NUM(attr); \
1207 else if (hash != Qnil) \
1208 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1209
1210 SET(count, objspace->gc_count);
1211 SET(time, objspace->total_gc_time / (1000 * 1000));
1212 SET(total_allocated_objects, objspace->total_allocated_objects);
1213 SET(total_bytes, mmtk_total_bytes());
1214 SET(used_bytes, mmtk_used_bytes());
1215 SET(free_bytes, mmtk_free_bytes());
1216 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1217 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1218#undef SET
1219
1220 if (!NIL_P(key)) {
1221 // Matched key should return above
1222 return Qundef;
1223 }
1224
1225 return hash;
1226}
1227
1228VALUE
1229rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1230{
1231 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1232 return hash_or_sym;
1233 }
1234 else {
1235 return Qundef;
1236 }
1237}
1238
1239// Miscellaneous
1240
1241#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1242static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1243
1245rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1246{
1247 static ID ID_object_id;
1248
1249 if (!ID_object_id) {
1250#define I(s) ID_##s = rb_intern(#s);
1251 I(object_id);
1252#undef I
1253 }
1254
1255 size_t n = 0;
1256
1257#define SET_ENTRY(na, v) do { \
1258 RUBY_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1259 object_metadata_entries[n].name = ID_##na; \
1260 object_metadata_entries[n].val = v; \
1261 n++; \
1262} while (0)
1263
1264 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1265
1266 object_metadata_entries[n].name = 0;
1267 object_metadata_entries[n].val = 0;
1268
1269 return object_metadata_entries;
1270}
1271
1272bool
1273rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1274{
1275 if (ptr == NULL) return false;
1276 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1277 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1278}
1279
1280bool
1281rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1282{
1283 return false;
1284}
1285
1286void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1287
1288void
1289rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1290{
1291 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1292 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1293 }
1294
1295 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1296}
1297
1298// GC Identification
1299
1300const char *
1301rb_gc_impl_active_gc_name(void)
1302{
1303 return "mmtk";
1304}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1791
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1757
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:489
static void RB_FL_UNSET(VALUE obj, VALUE flags)
Clears the given flag(s).
Definition fl_type.h:675
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:238
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_mGC
GC module.
Definition gc.c:424
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:177
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1513
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:792
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5771
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:45
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:41
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376