Ruby 3.5.0dev (2025-08-09 revision 2a6345e957c01f4495323723c7a3d7ac0d4ac339)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *finalizer_table;
28 struct MMTk_final_job *finalizer_jobs;
29 rb_postponed_job_handle_t finalizer_postponed_job;
30
31 struct ccan_list_head ractor_caches;
32 unsigned long live_ractor_cache_count;
33
34 pthread_mutex_t mutex;
35 bool world_stopped;
36 pthread_cond_t cond_world_stopped;
37 pthread_cond_t cond_world_started;
38 size_t start_the_world_count;
39
40 struct rb_gc_vm_context vm_context;
41};
42
44 struct ccan_list_node list_node;
45
46 MMTk_Mutator *mutator;
47 bool gc_mutator_p;
48};
49
51 struct MMTk_final_job *next;
52 enum {
53 MMTK_FINAL_JOB_DFREE,
54 MMTK_FINAL_JOB_FINALIZE,
55 } kind;
56 union {
57 struct {
58 void (*func)(void *);
59 void *data;
60 } dfree;
61 struct {
62 /* HACK: we store the object ID on the 0th element of this array. */
63 VALUE finalizer_array;
64 } finalize;
65 } as;
66};
67
68#ifdef RB_THREAD_LOCAL_SPECIFIER
69RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
70#else
71# error We currently need language-supported TLS
72#endif
73
74#include <pthread.h>
75
76static void
77rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
78{
79 rb_mmtk_gc_thread_tls = gc_thread_tls;
80}
81
82static bool
83rb_mmtk_is_mutator(void)
84{
85 return ruby_native_thread_p();
86}
87
88static void
89rb_mmtk_stop_the_world(void)
90{
91 struct objspace *objspace = rb_gc_get_objspace();
92
93 int err;
94 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
95 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
96 }
97
98 while (!objspace->world_stopped) {
99 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
100 }
101
102 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
103 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
104 }
105}
106
107static void
108rb_mmtk_resume_mutators(void)
109{
110 struct objspace *objspace = rb_gc_get_objspace();
111
112 int err;
113 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
114 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
115 }
116
117 objspace->world_stopped = false;
118 objspace->gc_count++;
119 pthread_cond_broadcast(&objspace->cond_world_started);
120
121 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
122 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
123 }
124}
125
126static void
127rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
128{
129 struct objspace *objspace = rb_gc_get_objspace();
130
131 size_t starting_gc_count = objspace->gc_count;
132 int lock_lev = RB_GC_VM_LOCK();
133 int err;
134 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
135 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
136 }
137
138 if (objspace->gc_count == starting_gc_count) {
139 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
140
141 rb_gc_initialize_vm_context(&objspace->vm_context);
142
143 mutator->gc_mutator_p = true;
144
145 struct timespec gc_start_time;
146 if (objspace->measure_gc_time) {
147 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
148 }
149
150 rb_gc_save_machine_context();
151
152 rb_gc_vm_barrier();
153
154 objspace->world_stopped = true;
155
156 pthread_cond_broadcast(&objspace->cond_world_stopped);
157
158 // Wait for GC end
159 while (objspace->world_stopped) {
160 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
161 }
162
163 if (objspace->measure_gc_time) {
164 struct timespec gc_end_time;
165 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
166
167 objspace->total_gc_time +=
168 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
169 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
170 }
171 }
172
173 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
174 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
175 }
176 RB_GC_VM_UNLOCK(lock_lev);
177}
178
179static size_t
180rb_mmtk_number_of_mutators(void)
181{
182 struct objspace *objspace = rb_gc_get_objspace();
183 return objspace->live_ractor_cache_count;
184}
185
186static void
187rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
188{
189 struct objspace *objspace = rb_gc_get_objspace();
190 struct MMTk_ractor_cache *ractor_cache;
191
192 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
193 visit_mutator(ractor_cache->mutator, data);
194 }
195}
196
197static void
198rb_mmtk_scan_gc_roots(void)
199{
200 struct objspace *objspace = rb_gc_get_objspace();
201
202 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
203 // See: https://github.com/ruby/mmtk/issues/22
204 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
205 rb_gc_mark_roots(objspace, NULL);
206 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
207}
208
209static int
210pin_value(st_data_t key, st_data_t value, st_data_t data)
211{
212 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
213
214 return ST_CONTINUE;
215}
216
217static void
218rb_mmtk_scan_objspace(void)
219{
220 struct objspace *objspace = rb_gc_get_objspace();
221
222 if (objspace->finalizer_table != NULL) {
223 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
224 }
225
226 struct MMTk_final_job *job = objspace->finalizer_jobs;
227 while (job != NULL) {
228 switch (job->kind) {
229 case MMTK_FINAL_JOB_DFREE:
230 break;
231 case MMTK_FINAL_JOB_FINALIZE:
232 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
233 break;
234 default:
235 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
236 }
237
238 job = job->next;
239 }
240}
241
242static void
243rb_mmtk_scan_object_ruby_style(MMTk_ObjectReference object)
244{
245 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
246}
247
248static void
249rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
250{
251 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
252}
253
254static void
255rb_mmtk_call_obj_free(MMTk_ObjectReference object)
256{
257 VALUE obj = (VALUE)object;
258 struct objspace *objspace = rb_gc_get_objspace();
259
260 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
261 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
262 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
263 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
264 }
265
266 rb_gc_obj_free(objspace, obj);
267}
268
269static size_t
270rb_mmtk_vm_live_bytes(void)
271{
272 return 0;
273}
274
275static void
276make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
277{
279 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)table));
281
283
284 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
285 job->next = objspace->finalizer_jobs;
286 job->kind = MMTK_FINAL_JOB_FINALIZE;
287 job->as.finalize.finalizer_array = table;
288
289 objspace->finalizer_jobs = job;
290}
291
292static int
293rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data)
294{
296 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
298
299 struct objspace *objspace = (struct objspace *)data;
300
301 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
302 make_final_job(objspace, (VALUE)key, (VALUE)value);
303
304 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
305
306 return ST_DELETE;
307 }
308
309 return ST_CONTINUE;
310}
311
312static void
313rb_mmtk_update_finalizer_table(void)
314{
315 struct objspace *objspace = rb_gc_get_objspace();
316
317 // TODO: replace with st_foreach_with_replace when GC is moving
318 st_foreach(objspace->finalizer_table, rb_mmtk_update_finalizer_table_i, (st_data_t)objspace);
319}
320
321static int
322rb_mmtk_update_table_i(VALUE val, void *data)
323{
324 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
325 return ST_DELETE;
326 }
327
328 return ST_CONTINUE;
329}
330
331static int
332rb_mmtk_global_tables_count(void)
333{
334 return RB_GC_VM_WEAK_TABLE_COUNT;
335}
336
337static void
338rb_mmtk_update_global_tables(int table)
339{
340 RUBY_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
341
342 rb_gc_vm_weak_table_foreach(rb_mmtk_update_table_i, NULL, NULL, true, (enum rb_gc_vm_weak_tables)table);
343}
344
345static bool
346rb_mmtk_special_const_p(MMTk_ObjectReference object)
347{
348 VALUE obj = (VALUE)object;
349
350 return RB_SPECIAL_CONST_P(obj);
351}
352
353// Bootup
354MMTk_RubyUpcalls ruby_upcalls = {
355 rb_mmtk_init_gc_worker_thread,
356 rb_mmtk_is_mutator,
357 rb_mmtk_stop_the_world,
358 rb_mmtk_resume_mutators,
359 rb_mmtk_block_for_gc,
360 rb_mmtk_number_of_mutators,
361 rb_mmtk_get_mutators,
362 rb_mmtk_scan_gc_roots,
363 rb_mmtk_scan_objspace,
364 rb_mmtk_scan_object_ruby_style,
365 rb_mmtk_call_gc_mark_children,
366 rb_mmtk_call_obj_free,
367 rb_mmtk_vm_live_bytes,
368 rb_mmtk_update_global_tables,
369 rb_mmtk_global_tables_count,
370 rb_mmtk_update_finalizer_table,
371 rb_mmtk_special_const_p,
372};
373
374// Use max 80% of the available memory by default for MMTk
375#define RB_MMTK_HEAP_LIMIT_PERC 80
376#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
377#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
378
379enum mmtk_heap_mode {
380 RB_MMTK_DYNAMIC_HEAP,
381 RB_MMTK_FIXED_HEAP
382};
383
384MMTk_Builder *
385rb_mmtk_builder_init(void)
386{
387 MMTk_Builder *builder = mmtk_builder_default();
388 return builder;
389}
390
391void *
392rb_gc_impl_objspace_alloc(void)
393{
394 MMTk_Builder *builder = rb_mmtk_builder_init();
395 mmtk_init_binding(builder, NULL, &ruby_upcalls, (MMTk_ObjectReference)Qundef);
396
397 return calloc(1, sizeof(struct objspace));
398}
399
400static void gc_run_finalizers(void *data);
401
402void
403rb_gc_impl_objspace_init(void *objspace_ptr)
404{
405 struct objspace *objspace = objspace_ptr;
406
407 objspace->measure_gc_time = true;
408
409 objspace->finalizer_table = st_init_numtable();
410 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
411
412 ccan_list_head_init(&objspace->ractor_caches);
413
414 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
415 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
416 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
417}
418
419void
420rb_gc_impl_objspace_free(void *objspace_ptr)
421{
422 free(objspace_ptr);
423}
424
425void *
426rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
427{
428 struct objspace *objspace = objspace_ptr;
429 if (objspace->live_ractor_cache_count == 0) {
430 mmtk_initialize_collection(ractor);
431 }
432 objspace->live_ractor_cache_count++;
433
434 struct MMTk_ractor_cache *cache = malloc(sizeof(struct MMTk_ractor_cache));
435 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
436
437 cache->mutator = mmtk_bind_mutator(cache);
438
439 return cache;
440}
441
442void
443rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
444{
445 struct objspace *objspace = objspace_ptr;
446 struct MMTk_ractor_cache *cache = cache_ptr;
447
448 ccan_list_del(&cache->list_node);
449
450 RUBY_ASSERT(objspace->live_ractor_cache_count > 1);
451 objspace->live_ractor_cache_count--;
452
453 mmtk_destroy_mutator(cache->mutator);
454}
455
456void rb_gc_impl_set_params(void *objspace_ptr) { }
457
458static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
459
460void
461rb_gc_impl_init(void)
462{
463 VALUE gc_constants = rb_hash_new();
464 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
465 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
466 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
467 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(640));
468 // Pretend we have 5 size pools
469 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
470 OBJ_FREEZE(gc_constants);
471 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
472
473 // no-ops for compatibility
474 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
475
479 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
480 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
481}
482
483static size_t heap_sizes[6] = {
484 40, 80, 160, 320, 640, 0
485};
486
487size_t *
488rb_gc_impl_heap_sizes(void *objspace_ptr)
489{
490 return heap_sizes;
491}
492
493int
494rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
495{
496 struct objspace *objspace = data;
497
498 if (!RB_TYPE_P(obj, T_NONE)) {
499 rb_gc_obj_free_vm_weak_references(obj);
500 rb_gc_obj_free(objspace, obj);
501 }
502
503 return 0;
504}
505
506// Shutdown
507static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
508
509void
510rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
511{
512 mmtk_set_gc_enabled(false);
513 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
514 mmtk_set_gc_enabled(true);
515}
516
517// GC
518void
519rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
520{
521 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
522}
523
524bool
525rb_gc_impl_during_gc_p(void *objspace_ptr)
526{
527 // TODO
528 return false;
529}
530
531static void
532rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
533{
534 rb_gc_prepare_heap_process_object((VALUE)obj);
535}
536
537void
538rb_gc_impl_prepare_heap(void *objspace_ptr)
539{
540 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
541}
542
543void
544rb_gc_impl_gc_enable(void *objspace_ptr)
545{
546 mmtk_set_gc_enabled(true);
547}
548
549void
550rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
551{
552 mmtk_set_gc_enabled(false);
553}
554
555bool
556rb_gc_impl_gc_enabled_p(void *objspace_ptr)
557{
558 return mmtk_gc_enabled_p();
559}
560
561void
562rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
563{
564 struct objspace *objspace = objspace_ptr;
565
566 objspace->gc_stress = RTEST(flag);
567}
568
569VALUE
570rb_gc_impl_stress_get(void *objspace_ptr)
571{
572 struct objspace *objspace = objspace_ptr;
573
574 return objspace->gc_stress ? Qtrue : Qfalse;
575}
576
577VALUE
578rb_gc_impl_config_get(void *objspace_ptr)
579{
580 VALUE hash = rb_hash_new();
581
582 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
583 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
584 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
585 size_t heap_min = mmtk_heap_min();
586 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
587 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
588
589 return hash;
590}
591
592void
593rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
594{
595 // TODO
596}
597
598// Object allocation
599
600VALUE
601rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
602{
603#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
604 struct objspace *objspace = objspace_ptr;
605 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
606
607 if (alloc_size > 640) rb_bug("too big");
608 for (int i = 0; i < 5; i++) {
609 if (alloc_size == heap_sizes[i]) break;
610 if (alloc_size < heap_sizes[i]) {
611 alloc_size = heap_sizes[i];
612 break;
613 }
614 }
615
616 if (objspace->gc_stress) {
617 mmtk_handle_user_collection_request(ractor_cache, false, false);
618 }
619
620 VALUE *alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size + 8, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
621 alloc_obj++;
622 alloc_obj[-1] = alloc_size;
623 alloc_obj[0] = flags;
624 alloc_obj[1] = klass;
625 if (alloc_size > 16) alloc_obj[2] = v1;
626 if (alloc_size > 24) alloc_obj[3] = v2;
627 if (alloc_size > 32) alloc_obj[4] = v3;
628
629 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size + 8, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
630
631 // TODO: only add when object needs obj_free to be called
632 mmtk_add_obj_free_candidate(alloc_obj);
633
634 objspace->total_allocated_objects++;
635
636 return (VALUE)alloc_obj;
637}
638
639size_t
640rb_gc_impl_obj_slot_size(VALUE obj)
641{
642 return ((VALUE *)obj)[-1];
643}
644
645size_t
646rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
647{
648 for (int i = 0; i < 5; i++) {
649 if (size == heap_sizes[i]) return i;
650 if (size < heap_sizes[i]) return i;
651 }
652
653 rb_bug("size too big");
654}
655
656bool
657rb_gc_impl_size_allocatable_p(size_t size)
658{
659 return size <= 640;
660}
661
662// Malloc
663void *
664rb_gc_impl_malloc(void *objspace_ptr, size_t size)
665{
666 // TODO: don't use system malloc
667 return malloc(size);
668}
669
670void *
671rb_gc_impl_calloc(void *objspace_ptr, size_t size)
672{
673 // TODO: don't use system calloc
674 return calloc(1, size);
675}
676
677void *
678rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
679{
680 // TODO: don't use system realloc
681 return realloc(ptr, new_size);
682}
683
684void
685rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
686{
687 // TODO: don't use system free
688 free(ptr);
689}
690
691void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
692
693// Marking
694void
695rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
696{
697 if (RB_SPECIAL_CONST_P(obj)) return;
698
699 rb_mmtk_gc_thread_tls->object_closure.c_function(rb_mmtk_gc_thread_tls->object_closure.rust_closure,
700 rb_mmtk_gc_thread_tls->gc_context,
701 (MMTk_ObjectReference)obj,
702 false);
703}
704
705void
706rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
707{
708 if (RB_SPECIAL_CONST_P(*ptr)) return;
709
710 // TODO: make it movable
711 rb_gc_impl_mark(objspace_ptr, *ptr);
712}
713
714void
715rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
716{
717 if (RB_SPECIAL_CONST_P(obj)) return;
718
719 // TODO: also pin
720 rb_gc_impl_mark(objspace_ptr, obj);
721}
722
723void
724rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
725{
726 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
727 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
728 }
729}
730
731void
732rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
733{
734 mmtk_mark_weak((MMTk_ObjectReference *)ptr);
735}
736
737void
738rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
739{
740 mmtk_remove_weak((MMTk_ObjectReference *)ptr);
741}
742
743// Compaction
744bool
745rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
746{
747 rb_bug("unimplemented");
748}
749
750VALUE
751rb_gc_impl_location(void *objspace_ptr, VALUE value)
752{
753 rb_bug("unimplemented");
754}
755
756// Write barriers
757void
758rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
759{
760 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
761
762 if (SPECIAL_CONST_P(b)) return;
763
764 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
765}
766
767void
768rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
769{
770 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
771}
772
773void
774rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
775{
776 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
777
778 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
779}
780
781// Heap walking
782static void
783each_objects_i(MMTk_ObjectReference obj, void *d)
784{
785 rb_darray(VALUE) *objs = d;
786
787 rb_darray_append(objs, (VALUE)obj);
788}
789
790static void
791each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
792{
793 rb_darray(VALUE) objs;
794 rb_darray_make(&objs, 0);
795
796 mmtk_enumerate_objects(each_objects_i, &objs);
797
798 VALUE *obj_ptr;
799 rb_darray_foreach(objs, i, obj_ptr) {
800 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
801
802 if (func(*obj_ptr, data) != 0) {
803 break;
804 }
805 }
806
807 rb_darray_free(objs);
808}
809
811 int (*func)(void *, void *, size_t, void *);
812 void *data;
813};
814
815static int
816rb_gc_impl_each_objects_i(VALUE obj, void *d)
817{
818 struct rb_gc_impl_each_objects_data *data = d;
819
820 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
821
822 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
823}
824
825void
826rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
827{
828 struct rb_gc_impl_each_objects_data each_objects_data = {
829 .func = func,
830 .data = data
831 };
832
833 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
834}
835
837 void (*func)(VALUE, void *);
838 void *data;
839};
840
841static int
842rb_gc_impl_each_object_i(VALUE obj, void *d)
843{
844 struct rb_gc_impl_each_object_data *data = d;
845
846 data->func(obj, data->data);
847
848 return 0;
849}
850
851void
852rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
853{
854 struct rb_gc_impl_each_object_data each_object_data = {
855 .func = func,
856 .data = data
857 };
858
859 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
860}
861
862// Finalizers
863static VALUE
864gc_run_finalizers_get_final(long i, void *data)
865{
866 VALUE table = (VALUE)data;
867
868 return RARRAY_AREF(table, i + 1);
869}
870
871static void
872gc_run_finalizers(void *data)
873{
874 struct objspace *objspace = data;
875
876 rb_gc_set_pending_interrupt();
877
878 while (objspace->finalizer_jobs != NULL) {
879 struct MMTk_final_job *job = objspace->finalizer_jobs;
880 objspace->finalizer_jobs = job->next;
881
882 switch (job->kind) {
883 case MMTK_FINAL_JOB_DFREE:
884 job->as.dfree.func(job->as.dfree.data);
885 break;
886 case MMTK_FINAL_JOB_FINALIZE: {
887 VALUE finalizer_array = job->as.finalize.finalizer_array;
888
889 rb_gc_run_obj_finalizer(
890 RARRAY_AREF(finalizer_array, 0),
891 RARRAY_LEN(finalizer_array) - 1,
892 gc_run_finalizers_get_final,
893 (void *)finalizer_array
894 );
895
896 RB_GC_GUARD(finalizer_array);
897 break;
898 }
899 }
900
901 xfree(job);
902 }
903
904 rb_gc_unset_pending_interrupt();
905}
906
907void
908rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
909{
910 if (dfree == NULL) return;
911
912 struct objspace *objspace = objspace_ptr;
913
914 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
915 job->kind = MMTK_FINAL_JOB_DFREE;
916 job->as.dfree.func = dfree;
917 job->as.dfree.data = data;
918
919 struct MMTk_final_job *prev;
920 do {
921 job->next = objspace->finalizer_jobs;
922 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
923 } while (prev != job->next);
924
925 if (!ruby_free_at_exit_p()) {
926 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
927 }
928}
929
930VALUE
931rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
932{
933 struct objspace *objspace = objspace_ptr;
934 VALUE table;
935 st_data_t data;
936
937 RBASIC(obj)->flags |= FL_FINALIZE;
938
939 int lev = RB_GC_VM_LOCK();
940
941 if (st_lookup(objspace->finalizer_table, obj, &data)) {
942 table = (VALUE)data;
943
944 /* avoid duplicate block, table is usually small */
945 {
946 long len = RARRAY_LEN(table);
947 long i;
948
949 for (i = 0; i < len; i++) {
950 VALUE recv = RARRAY_AREF(table, i);
951 if (rb_equal(recv, block)) {
952 RB_GC_VM_UNLOCK(lev);
953 return recv;
954 }
955 }
956 }
957
958 rb_ary_push(table, block);
959 }
960 else {
961 table = rb_ary_new3(2, rb_obj_id(obj), block);
962 rb_obj_hide(table);
963 st_add_direct(objspace->finalizer_table, obj, table);
964 }
965
966 RB_GC_VM_UNLOCK(lev);
967
968 return block;
969}
970
971void
972rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
973{
974 struct objspace *objspace = objspace_ptr;
975
976 st_data_t data = obj;
977
978 int lev = RB_GC_VM_LOCK();
979 st_delete(objspace->finalizer_table, &data, 0);
980 RB_GC_VM_UNLOCK(lev);
981
982 FL_UNSET(obj, FL_FINALIZE);
983}
984
985void
986rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
987{
988 struct objspace *objspace = objspace_ptr;
989 VALUE table;
990 st_data_t data;
991
992 if (!FL_TEST(obj, FL_FINALIZE)) return;
993
994 int lev = RB_GC_VM_LOCK();
995 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
996 table = rb_ary_dup((VALUE)data);
997 RARRAY_ASET(table, 0, rb_obj_id(dest));
998 st_insert(objspace->finalizer_table, dest, table);
999 FL_SET(dest, FL_FINALIZE);
1000 }
1001 else {
1002 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1003 }
1004 RB_GC_VM_UNLOCK(lev);
1005}
1006
1007static int
1008move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1009{
1010 struct objspace *objspace = (struct objspace *)arg;
1011
1012 make_final_job(objspace, (VALUE)key, (VALUE)val);
1013
1014 return ST_DELETE;
1015}
1016
1017void
1018rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1019{
1020 struct objspace *objspace = objspace_ptr;
1021
1022 while (objspace->finalizer_table->num_entries) {
1023 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1024
1025 gc_run_finalizers(objspace);
1026 }
1027
1028 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1029 for (size_t i = 0; i < registered_candidates.len; i++) {
1030 VALUE obj = (VALUE)registered_candidates.ptr[i];
1031
1032 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1033 rb_gc_obj_free(objspace_ptr, obj);
1034 RBASIC(obj)->flags = 0;
1035 }
1036 }
1037 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1038
1039 gc_run_finalizers(objspace);
1040}
1041
1042// Forking
1043
1044void
1045rb_gc_impl_before_fork(void *objspace_ptr)
1046{
1047 mmtk_before_fork();
1048}
1049
1050void
1051rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1052{
1053 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1054}
1055
1056// Statistics
1057
1058void
1059rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1060{
1061 struct objspace *objspace = objspace_ptr;
1062
1063 objspace->measure_gc_time = RTEST(flag);
1064}
1065
1066bool
1067rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1068{
1069 struct objspace *objspace = objspace_ptr;
1070
1071 return objspace->measure_gc_time;
1072}
1073
1074unsigned long long
1075rb_gc_impl_get_total_time(void *objspace_ptr)
1076{
1077 struct objspace *objspace = objspace_ptr;
1078
1079 return objspace->total_gc_time;
1080}
1081
1082size_t
1083rb_gc_impl_gc_count(void *objspace_ptr)
1084{
1085 struct objspace *objspace = objspace_ptr;
1086
1087 return objspace->gc_count;
1088}
1089
1090VALUE
1091rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1092{
1093 VALUE hash = Qnil, key = Qnil;
1094
1095 if (SYMBOL_P(hash_or_key)) {
1096 key = hash_or_key;
1097 }
1098 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1099 hash = hash_or_key;
1100 }
1101 else {
1102 rb_bug("gc_info_decode: non-hash or symbol given");
1103 }
1104
1105#define SET(name, attr) \
1106 if (key == ID2SYM(rb_intern_const(#name))) \
1107 return (attr); \
1108 else if (hash != Qnil) \
1109 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1110
1111 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1112 * the :state key and expects a result. This always returns the :none state. */
1113 SET(state, ID2SYM(rb_intern_const("none")));
1114#undef SET
1115
1116 if (!NIL_P(key)) {
1117 // Matched key should return above
1118 return Qundef;
1119 }
1120
1121 return hash;
1122}
1123
1124enum gc_stat_sym {
1125 gc_stat_sym_count,
1126 gc_stat_sym_time,
1127 gc_stat_sym_total_allocated_objects,
1128 gc_stat_sym_total_bytes,
1129 gc_stat_sym_used_bytes,
1130 gc_stat_sym_free_bytes,
1131 gc_stat_sym_starting_heap_address,
1132 gc_stat_sym_last_heap_address,
1133 gc_stat_sym_last
1134};
1135
1136static VALUE gc_stat_symbols[gc_stat_sym_last];
1137
1138static void
1139setup_gc_stat_symbols(void)
1140{
1141 if (gc_stat_symbols[0] == 0) {
1142#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1143 S(count);
1144 S(time);
1145 S(total_allocated_objects);
1146 S(total_bytes);
1147 S(used_bytes);
1148 S(free_bytes);
1149 S(starting_heap_address);
1150 S(last_heap_address);
1151 }
1152}
1153
1154VALUE
1155rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1156{
1157 struct objspace *objspace = objspace_ptr;
1158 VALUE hash = Qnil, key = Qnil;
1159
1160 setup_gc_stat_symbols();
1161
1162 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1163 hash = hash_or_sym;
1164 }
1165 else if (SYMBOL_P(hash_or_sym)) {
1166 key = hash_or_sym;
1167 }
1168 else {
1169 rb_bug("non-hash or symbol given");
1170 }
1171
1172#define SET(name, attr) \
1173 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1174 return SIZET2NUM(attr); \
1175 else if (hash != Qnil) \
1176 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1177
1178 SET(count, objspace->gc_count);
1179 SET(time, objspace->total_gc_time / (1000 * 1000));
1180 SET(total_allocated_objects, objspace->total_allocated_objects);
1181 SET(total_bytes, mmtk_total_bytes());
1182 SET(used_bytes, mmtk_used_bytes());
1183 SET(free_bytes, mmtk_free_bytes());
1184 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1185 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1186#undef SET
1187
1188 if (!NIL_P(key)) {
1189 // Matched key should return above
1190 return Qundef;
1191 }
1192
1193 return hash;
1194}
1195
1196VALUE
1197rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1198{
1199 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1200 return hash_or_sym;
1201 }
1202 else {
1203 return Qundef;
1204 }
1205}
1206
1207// Miscellaneous
1208
1209#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1210static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1211
1213rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1214{
1215 static ID ID_object_id;
1216
1217 if (!ID_object_id) {
1218#define I(s) ID_##s = rb_intern(#s);
1219 I(object_id);
1220#undef I
1221 }
1222
1223 size_t n = 0;
1224
1225#define SET_ENTRY(na, v) do { \
1226 RUBY_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1227 object_metadata_entries[n].name = ID_##na; \
1228 object_metadata_entries[n].val = v; \
1229 n++; \
1230} while (0)
1231
1232 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1233
1234 object_metadata_entries[n].name = 0;
1235 object_metadata_entries[n].val = 0;
1236
1237 return object_metadata_entries;
1238}
1239
1240bool
1241rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1242{
1243 if (ptr == NULL) return false;
1244 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1245 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1246}
1247
1248bool
1249rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1250{
1251 return false;
1252}
1253
1254void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1255
1256void
1257rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1258{
1259 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1260 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1261 }
1262
1263 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1264}
1265
1266// GC Identification
1267
1268const char *
1269rb_gc_impl_active_gc_name(void)
1270{
1271 return "mmtk";
1272}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:343
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1784
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1750
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:489
static void RB_FL_UNSET(VALUE obj, VALUE flags)
Clears the given flag(s).
Definition fl_type.h:675
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:238
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:101
VALUE rb_mGC
GC module.
Definition gc.c:424
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:175
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:787
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5759
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:45
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:41
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376