Ruby 4.1.0dev (2026-03-06 revision 9aca729140424bbf465c11ab8ab53e5cc6602c01)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *finalizer_table;
28 struct MMTk_final_job *finalizer_jobs;
29 rb_postponed_job_handle_t finalizer_postponed_job;
30
31 struct ccan_list_head ractor_caches;
32 unsigned long live_ractor_cache_count;
33
34 pthread_mutex_t mutex;
35 rb_atomic_t mutator_blocking_count;
36 bool world_stopped;
37 pthread_cond_t cond_world_stopped;
38 pthread_cond_t cond_world_started;
39 size_t start_the_world_count;
40
41 struct {
42 bool gc_thread_crashed;
43 char crash_msg[256];
44 } crash_context;
45
46 struct rb_gc_vm_context vm_context;
47
48 unsigned int fork_hook_vm_lock_lev;
49};
50
51#define OBJ_FREE_BUF_CAPACITY 128
52
54 struct ccan_list_node list_node;
55
56 MMTk_Mutator *mutator;
57 bool gc_mutator_p;
58
59 MMTk_BumpPointer *bump_pointer;
60
61 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
62 size_t obj_free_parallel_count;
63 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
64 size_t obj_free_non_parallel_count;
65};
66
68 struct MMTk_final_job *next;
69 enum {
70 MMTK_FINAL_JOB_DFREE,
71 MMTK_FINAL_JOB_FINALIZE,
72 } kind;
73 union {
74 struct {
75 void (*func)(void *);
76 void *data;
77 } dfree;
78 struct {
79 /* HACK: we store the object ID on the 0th element of this array. */
80 VALUE finalizer_array;
81 } finalize;
82 } as;
83};
84
85#ifdef RB_THREAD_LOCAL_SPECIFIER
86RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
87
88RB_THREAD_LOCAL_SPECIFIER VALUE marking_parent_object;
89#else
90# error We currently need language-supported TLS
91#endif
92
93#ifdef MMTK_DEBUG
94# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
95#else
96# define MMTK_ASSERT(expr, ...) ((void)0)
97#endif
98
99#include <pthread.h>
100
101static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
102
103static void
104rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
105{
106 rb_mmtk_gc_thread_tls = gc_thread_tls;
107}
108
109static bool
110rb_mmtk_is_mutator(void)
111{
112 return ruby_native_thread_p();
113}
114
115static void
116rb_mmtk_stop_the_world(void)
117{
118 struct objspace *objspace = rb_gc_get_objspace();
119
120 int err;
121 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
122 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
123 }
124
125 while (!objspace->world_stopped) {
126 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
127 }
128
129 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
130 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
131 }
132}
133
134static void
135rb_mmtk_resume_mutators(void)
136{
137 struct objspace *objspace = rb_gc_get_objspace();
138
139 int err;
140 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
141 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
142 }
143
144 objspace->world_stopped = false;
145 objspace->gc_count++;
146 pthread_cond_broadcast(&objspace->cond_world_started);
147
148 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
149 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
150 }
151}
152
153static void mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache);
154
155static void
156rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
157{
158 struct objspace *objspace = rb_gc_get_objspace();
159
160 size_t starting_gc_count = objspace->gc_count;
161 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
162 int lock_lev = RB_GC_VM_LOCK();
163 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
164 int err;
165 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
166 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
167 }
168
169 if (objspace->gc_count == starting_gc_count) {
170 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
171
172 rb_gc_initialize_vm_context(&objspace->vm_context);
173
174 mutator->gc_mutator_p = true;
175
176 struct timespec gc_start_time;
177 if (objspace->measure_gc_time) {
178 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
179 }
180
181 rb_gc_save_machine_context();
182
183 rb_gc_vm_barrier();
184
185 struct MMTk_ractor_cache *rc;
186 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
187 mmtk_flush_obj_free_buffer(rc);
188 }
189
190 objspace->world_stopped = true;
191
192 pthread_cond_broadcast(&objspace->cond_world_stopped);
193
194 // Wait for GC end
195 while (objspace->world_stopped) {
196 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
197 }
198
199 if (RB_UNLIKELY(objspace->crash_context.gc_thread_crashed)) {
200 rb_bug("%s", objspace->crash_context.crash_msg);
201 }
202
203 if (objspace->measure_gc_time) {
204 struct timespec gc_end_time;
205 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
206
207 objspace->total_gc_time +=
208 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
209 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
210 }
211 }
212
213 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
214 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
215 }
216 RB_GC_VM_UNLOCK(lock_lev);
217}
218
219static void
220rb_mmtk_before_updating_jit_code(void)
221{
222 rb_gc_before_updating_jit_code();
223}
224
225static void
226rb_mmtk_after_updating_jit_code(void)
227{
228 rb_gc_after_updating_jit_code();
229}
230
231static size_t
232rb_mmtk_number_of_mutators(void)
233{
234 struct objspace *objspace = rb_gc_get_objspace();
235 return objspace->live_ractor_cache_count;
236}
237
238static void
239rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
240{
241 struct objspace *objspace = rb_gc_get_objspace();
242 struct MMTk_ractor_cache *ractor_cache;
243
244 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
245 visit_mutator(ractor_cache->mutator, data);
246 }
247}
248
249static void
250rb_mmtk_scan_gc_roots(void)
251{
252 struct objspace *objspace = rb_gc_get_objspace();
253
254 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
255 // See: https://github.com/ruby/mmtk/issues/22
256 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
257 rb_gc_mark_roots(objspace, NULL);
258 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
259}
260
261static int
262pin_value(st_data_t key, st_data_t value, st_data_t data)
263{
264 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
265
266 return ST_CONTINUE;
267}
268
269static void
270rb_mmtk_scan_objspace(void)
271{
272 struct objspace *objspace = rb_gc_get_objspace();
273
274 if (objspace->finalizer_table != NULL) {
275 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
276 }
277
278 struct MMTk_final_job *job = objspace->finalizer_jobs;
279 while (job != NULL) {
280 switch (job->kind) {
281 case MMTK_FINAL_JOB_DFREE:
282 break;
283 case MMTK_FINAL_JOB_FINALIZE:
284 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
285 break;
286 default:
287 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
288 }
289
290 job = job->next;
291 }
292}
293
294static void
295rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
296{
297 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
298}
299
300static void
301rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
302{
303 VALUE object = (VALUE)mmtk_object;
304
305 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
306 marking_parent_object = object;
307 rb_gc_update_object_references(rb_gc_get_objspace(), object);
308 marking_parent_object = 0;
309 }
310}
311
312static void
313rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
314{
315 marking_parent_object = (VALUE)object;
316 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
317 marking_parent_object = 0;
318}
319
320static void
321rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
322{
323 VALUE object = (VALUE)mmtk_object;
324
325 marking_parent_object = object;
326
327 rb_gc_handle_weak_references(object);
328
329 if (moving) {
330 rb_gc_update_object_references(rb_gc_get_objspace(), object);
331 }
332
333 marking_parent_object = 0;
334}
335
336static void
337rb_mmtk_call_obj_free(MMTk_ObjectReference object)
338{
339 VALUE obj = (VALUE)object;
340 struct objspace *objspace = rb_gc_get_objspace();
341
342 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
343 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
344 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
345 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
346 }
347
348 rb_gc_obj_free(objspace, obj);
349
350#ifdef MMTK_DEBUG
351 memset((void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
352#endif
353}
354
355static size_t
356rb_mmtk_vm_live_bytes(void)
357{
358 return 0;
359}
360
361static void
362make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
363{
364 MMTK_ASSERT(RB_BUILTIN_TYPE(table) == T_ARRAY);
365
366 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
367 job->next = objspace->finalizer_jobs;
368 job->kind = MMTK_FINAL_JOB_FINALIZE;
369 job->as.finalize.finalizer_array = table;
370
371 objspace->finalizer_jobs = job;
372}
373
374static int
375rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data, int error)
376{
377 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
378 MMTK_ASSERT(RB_BUILTIN_TYPE(value) == T_ARRAY);
379
380 struct objspace *objspace = (struct objspace *)data;
381
382 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
383 VALUE new_key_location = rb_mmtk_call_object_closure((VALUE)key, false);
384
385 MMTK_ASSERT(RB_FL_TEST(new_key_location, RUBY_FL_FINALIZE));
386
387 if (new_key_location != key) {
388 return ST_REPLACE;
389 }
390 }
391 else {
392 make_final_job(objspace, (VALUE)key, (VALUE)value);
393
394 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
395
396 return ST_DELETE;
397 }
398
399 return ST_CONTINUE;
400}
401
402static int
403rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
404{
405 *key = rb_mmtk_call_object_closure((VALUE)*key, false);
406
407 return ST_CONTINUE;
408}
409
410static void
411rb_mmtk_update_finalizer_table(void)
412{
413 struct objspace *objspace = rb_gc_get_objspace();
414
415 st_foreach_with_replace(
416 objspace->finalizer_table,
417 rb_mmtk_update_finalizer_table_i,
418 rb_mmtk_update_finalizer_table_replace_i,
419 (st_data_t)objspace
420 );
421}
422
423static int
424rb_mmtk_global_tables_count(void)
425{
426 return RB_GC_VM_WEAK_TABLE_COUNT;
427}
428
429static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
430
431static int
432rb_mmtk_update_global_tables_i(VALUE val, void *data)
433{
434 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
435 return ST_DELETE;
436 }
437
438 // TODO: check only if in moving GC
439 if (rb_mmtk_call_object_closure(val, false) != val) {
440 return ST_REPLACE;
441 }
442
443 return ST_CONTINUE;
444}
445
446static int
447rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
448{
449 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
450 *ptr = rb_mmtk_call_object_closure(*ptr, false);
451
452 return ST_CONTINUE;
453}
454
455static void
456rb_mmtk_update_global_tables(int table, bool moving)
457{
458 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
459
460 rb_gc_vm_weak_table_foreach(
461 rb_mmtk_update_global_tables_i,
462 rb_mmtk_update_global_tables_replace_i,
463 NULL,
464 !moving,
465 (enum rb_gc_vm_weak_tables)table
466 );
467}
468
469static bool
470rb_mmtk_special_const_p(MMTk_ObjectReference object)
471{
472 VALUE obj = (VALUE)object;
473
474 return RB_SPECIAL_CONST_P(obj);
475}
476
477RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 1, 2)
478static void
479rb_mmtk_gc_thread_bug(const char *msg, ...)
480{
481 struct objspace *objspace = rb_gc_get_objspace();
482
483 objspace->crash_context.gc_thread_crashed = true;
484
485 va_list args;
486 va_start(args, msg);
487 vsnprintf(objspace->crash_context.crash_msg, sizeof(objspace->crash_context.crash_msg), msg, args);
488 va_end(args);
489
490 fprintf(stderr, "-- GC thread backtrace "
491 "-------------------------------------------\n");
492 rb_gc_print_backtrace();
493 fprintf(stderr, "\n");
494
495 rb_mmtk_resume_mutators();
496
497 sleep(5);
498
499 rb_bug("rb_mmtk_gc_thread_bug");
500}
501
502static void
503rb_mmtk_gc_thread_panic_handler(void)
504{
505 rb_mmtk_gc_thread_bug("MMTk GC thread panicked");
506}
507
508static void
509rb_mmtk_mutator_thread_panic_handler(void)
510{
511 rb_bug("Ruby mutator thread panicked");
512}
513
514// Bootup
515MMTk_RubyUpcalls ruby_upcalls = {
516 rb_mmtk_init_gc_worker_thread,
517 rb_mmtk_is_mutator,
518 rb_mmtk_stop_the_world,
519 rb_mmtk_resume_mutators,
520 rb_mmtk_block_for_gc,
521 rb_mmtk_before_updating_jit_code,
522 rb_mmtk_after_updating_jit_code,
523 rb_mmtk_number_of_mutators,
524 rb_mmtk_get_mutators,
525 rb_mmtk_scan_gc_roots,
526 rb_mmtk_scan_objspace,
527 rb_mmtk_move_obj_during_marking,
528 rb_mmtk_update_object_references,
529 rb_mmtk_call_gc_mark_children,
530 rb_mmtk_handle_weak_references,
531 rb_mmtk_call_obj_free,
532 rb_mmtk_vm_live_bytes,
533 rb_mmtk_update_global_tables,
534 rb_mmtk_global_tables_count,
535 rb_mmtk_update_finalizer_table,
536 rb_mmtk_special_const_p,
537 rb_mmtk_mutator_thread_panic_handler,
538 rb_mmtk_gc_thread_panic_handler,
539};
540
541// Use max 80% of the available memory by default for MMTk
542#define RB_MMTK_HEAP_LIMIT_PERC 80
543#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
544#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
545
546enum mmtk_heap_mode {
547 RB_MMTK_DYNAMIC_HEAP,
548 RB_MMTK_FIXED_HEAP
549};
550
551MMTk_Builder *
552rb_mmtk_builder_init(void)
553{
554 MMTk_Builder *builder = mmtk_builder_default();
555 return builder;
556}
557
558void *
559rb_gc_impl_objspace_alloc(void)
560{
561 MMTk_Builder *builder = rb_mmtk_builder_init();
562 mmtk_init_binding(builder, NULL, &ruby_upcalls);
563
564 return calloc(1, sizeof(struct objspace));
565}
566
567static void gc_run_finalizers(void *data);
568
569void
570rb_gc_impl_objspace_init(void *objspace_ptr)
571{
572 struct objspace *objspace = objspace_ptr;
573
574 objspace->measure_gc_time = true;
575
576 objspace->finalizer_table = st_init_numtable();
577 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
578
579 ccan_list_head_init(&objspace->ractor_caches);
580
581 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
582 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
583 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
584}
585
586void
587rb_gc_impl_objspace_free(void *objspace_ptr)
588{
589 free(objspace_ptr);
590}
591
592void *
593rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
594{
595 struct objspace *objspace = objspace_ptr;
596 if (objspace->live_ractor_cache_count == 0) {
597 mmtk_initialize_collection(ractor);
598 }
599 objspace->live_ractor_cache_count++;
600
601 struct MMTk_ractor_cache *cache = calloc(1, sizeof(struct MMTk_ractor_cache));
602 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
603
604 cache->mutator = mmtk_bind_mutator(cache);
605 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
606
607 return cache;
608}
609
610void
611rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
612{
613 struct objspace *objspace = objspace_ptr;
614 struct MMTk_ractor_cache *cache = cache_ptr;
615
616 ccan_list_del(&cache->list_node);
617
618 mmtk_flush_obj_free_buffer(cache);
619
620 if (ruby_free_at_exit_p()) {
621 MMTK_ASSERT(objspace->live_ractor_cache_count > 0);
622 }
623 else {
624 MMTK_ASSERT(objspace->live_ractor_cache_count > 1);
625 }
626
627 objspace->live_ractor_cache_count--;
628
629 mmtk_destroy_mutator(cache->mutator);
630}
631
632void rb_gc_impl_set_params(void *objspace_ptr) { }
633
634static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
635
636#define MMTK_HEAP_COUNT 6
637#define MMTK_MAX_OBJ_SIZE 640
638
639static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
640 32, 40, 80, 160, 320, MMTK_MAX_OBJ_SIZE, 0
641};
642
643void
644rb_gc_impl_init(void)
645{
646 VALUE gc_constants = rb_hash_new();
647 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
648 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
649 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
650 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
651 // Pretend we have 5 size pools
652 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(MMTK_HEAP_COUNT));
653 // TODO: correctly set RVALUE_OLD_AGE when we have generational GC support
654 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), INT2FIX(0));
655 OBJ_FREEZE(gc_constants);
656 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
657
658 // no-ops for compatibility
659 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
660
664 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
665 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
666}
667
668size_t *
669rb_gc_impl_heap_sizes(void *objspace_ptr)
670{
671 return heap_sizes;
672}
673
674int
675rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
676{
677 struct objspace *objspace = data;
678
679 if (!RB_TYPE_P(obj, T_NONE)) {
680 rb_gc_obj_free_vm_weak_references(obj);
681 rb_gc_obj_free(objspace, obj);
682 }
683
684 return 0;
685}
686
687// Shutdown
688static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
689
690void
691rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
692{
693 mmtk_set_gc_enabled(false);
694 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
695 mmtk_set_gc_enabled(true);
696}
697
698// GC
699void
700rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
701{
702 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
703}
704
705bool
706rb_gc_impl_during_gc_p(void *objspace_ptr)
707{
708 struct objspace *objspace = objspace_ptr;
709 return objspace->world_stopped;
710}
711
712static void
713rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
714{
715 rb_gc_prepare_heap_process_object((VALUE)obj);
716}
717
718void
719rb_gc_impl_prepare_heap(void *objspace_ptr)
720{
721 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
722}
723
724void
725rb_gc_impl_gc_enable(void *objspace_ptr)
726{
727 mmtk_set_gc_enabled(true);
728}
729
730void
731rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
732{
733 mmtk_set_gc_enabled(false);
734}
735
736bool
737rb_gc_impl_gc_enabled_p(void *objspace_ptr)
738{
739 return mmtk_gc_enabled_p();
740}
741
742void
743rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
744{
745 struct objspace *objspace = objspace_ptr;
746
747 objspace->gc_stress = RTEST(flag);
748}
749
750VALUE
751rb_gc_impl_stress_get(void *objspace_ptr)
752{
753 struct objspace *objspace = objspace_ptr;
754
755 return objspace->gc_stress ? Qtrue : Qfalse;
756}
757
758VALUE
759rb_gc_impl_config_get(void *objspace_ptr)
760{
761 VALUE hash = rb_hash_new();
762
763 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
764 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
765 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
766 size_t heap_min = mmtk_heap_min();
767 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
768 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
769
770 return hash;
771}
772
773void
774rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
775{
776 // TODO
777}
778
779// Object allocation
780
781static VALUE
782rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
783{
784 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
785 if (bump_pointer == NULL) return 0;
786
787 uintptr_t new_cursor = bump_pointer->cursor + size;
788
789 if (new_cursor > bump_pointer->limit) {
790 return 0;
791 }
792 else {
793 VALUE obj = (VALUE)bump_pointer->cursor;
794 bump_pointer->cursor = new_cursor;
795 return obj;
796 }
797}
798
799static bool
800obj_can_parallel_free_p(VALUE obj)
801{
802 switch (RB_BUILTIN_TYPE(obj)) {
803 case T_ARRAY:
804 case T_BIGNUM:
805 case T_COMPLEX:
806 case T_FLOAT:
807 case T_HASH:
808 case T_OBJECT:
809 case T_RATIONAL:
810 case T_REGEXP:
811 case T_STRING:
812 case T_STRUCT:
813 case T_SYMBOL:
814 return true;
815 default:
816 return false;
817 }
818}
819
820static void
821mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache)
822{
823 if (cache->obj_free_parallel_count > 0) {
824 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
825 cache->obj_free_parallel_count, true);
826 cache->obj_free_parallel_count = 0;
827 }
828 if (cache->obj_free_non_parallel_count > 0) {
829 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
830 cache->obj_free_non_parallel_count, false);
831 cache->obj_free_non_parallel_count = 0;
832 }
833}
834
835static inline void
836mmtk_buffer_obj_free_candidate(struct MMTk_ractor_cache *cache, VALUE obj)
837{
838 if (obj_can_parallel_free_p(obj)) {
839 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
840 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
841 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
842 cache->obj_free_parallel_count, true);
843 cache->obj_free_parallel_count = 0;
844 }
845 }
846 else {
847 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
848 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
849 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
850 cache->obj_free_non_parallel_count, false);
851 cache->obj_free_non_parallel_count = 0;
852 }
853 }
854}
855
856VALUE
857rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
858{
859#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
860 struct objspace *objspace = objspace_ptr;
861 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
862
863 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
864 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
865 if (alloc_size == heap_sizes[i]) break;
866 if (alloc_size < heap_sizes[i]) {
867 alloc_size = heap_sizes[i];
868 break;
869 }
870 }
871
872 if (objspace->gc_stress) {
873 mmtk_handle_user_collection_request(ractor_cache, false, false);
874 }
875
876 alloc_size += sizeof(VALUE);
877
878 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
879 if (!alloc_obj) {
880 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
881 }
882
883 alloc_obj++;
884 alloc_obj[-1] = alloc_size - sizeof(VALUE);
885 alloc_obj[0] = flags;
886 alloc_obj[1] = klass;
887
888 // TODO: implement fast path for mmtk_post_alloc
889 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
890
891 // TODO: only add when object needs obj_free to be called
892 mmtk_buffer_obj_free_candidate(ractor_cache, (VALUE)alloc_obj);
893
894 objspace->total_allocated_objects++;
895
896 return (VALUE)alloc_obj;
897}
898
899size_t
900rb_gc_impl_obj_slot_size(VALUE obj)
901{
902 return ((VALUE *)obj)[-1];
903}
904
905size_t
906rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
907{
908 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
909 if (size == heap_sizes[i]) return i;
910 if (size < heap_sizes[i]) return i;
911 }
912
913 rb_bug("size too big");
914}
915
916bool
917rb_gc_impl_size_allocatable_p(size_t size)
918{
919 return size <= MMTK_MAX_OBJ_SIZE;
920}
921
922// Malloc
923void *
924rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
925{
926 // TODO: don't use system malloc
927 return malloc(size);
928}
929
930void *
931rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
932{
933 // TODO: don't use system calloc
934 return calloc(1, size);
935}
936
937void *
938rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
939{
940 // TODO: don't use system realloc
941 return realloc(ptr, new_size);
942}
943
944void
945rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
946{
947 // TODO: don't use system free
948 free(ptr);
949}
950
951void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
952
953// Marking
954static inline VALUE
955rb_mmtk_call_object_closure(VALUE obj, bool pin)
956{
957 if (RB_UNLIKELY(RB_BUILTIN_TYPE(obj) == T_NONE)) {
958 const size_t info_size = 256;
959 char obj_info_buf[info_size];
960 rb_raw_obj_info(obj_info_buf, info_size, obj);
961
962 char parent_obj_info_buf[info_size];
963 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
964
965 rb_mmtk_gc_thread_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
966 }
967
968 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
969 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
970 rb_mmtk_gc_thread_tls->gc_context,
971 (MMTk_ObjectReference)obj,
972 pin
973 );
974}
975
976void
977rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
978{
979 if (RB_SPECIAL_CONST_P(obj)) return;
980
981 rb_mmtk_call_object_closure(obj, false);
982}
983
984void
985rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
986{
987 if (RB_SPECIAL_CONST_P(*ptr)) return;
988
989 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
990 if (new_obj != *ptr) {
991 *ptr = new_obj;
992 }
993}
994
995void
996rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
997{
998 if (RB_SPECIAL_CONST_P(obj)) return;
999
1000 rb_mmtk_call_object_closure(obj, true);
1001}
1002
1003void
1004rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
1005{
1006 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
1007 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1008 }
1009}
1010
1011void
1012rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
1013{
1015 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1016}
1017
1018bool
1019rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
1020{
1021 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1022}
1023
1024// Compaction
1025void
1026rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
1027{
1028 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1029}
1030
1031bool
1032rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
1033{
1034 return rb_mmtk_call_object_closure(obj, false) != obj;
1035}
1036
1037VALUE
1038rb_gc_impl_location(void *objspace_ptr, VALUE obj)
1039{
1040 return rb_mmtk_call_object_closure(obj, false);
1041}
1042
1043// Write barriers
1044void
1045rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
1046{
1047 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1048
1049 if (SPECIAL_CONST_P(b)) return;
1050
1051#ifdef MMTK_DEBUG
1052 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)a)) {
1053 char buff[256];
1054 rb_bug("a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1055 }
1056
1057 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)b)) {
1058 char buff[256];
1059 rb_bug("b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1060 }
1061#endif
1062
1063 MMTK_ASSERT(BUILTIN_TYPE(a) != T_NONE);
1064 MMTK_ASSERT(BUILTIN_TYPE(b) != T_NONE);
1065
1066 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1067}
1068
1069void
1070rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
1071{
1072 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1073}
1074
1075void
1076rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
1077{
1078 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1079
1080 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1081}
1082
1083// Heap walking
1084static void
1085each_objects_i(MMTk_ObjectReference obj, void *d)
1086{
1087 rb_darray(VALUE) *objs = d;
1088
1089 rb_darray_append(objs, (VALUE)obj);
1090}
1091
1092static void
1093each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
1094{
1095 rb_darray(VALUE) objs;
1096 rb_darray_make(&objs, 0);
1097
1098 mmtk_enumerate_objects(each_objects_i, &objs);
1099
1100 VALUE *obj_ptr;
1101 rb_darray_foreach(objs, i, obj_ptr) {
1102 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
1103
1104 if (func(*obj_ptr, data) != 0) {
1105 break;
1106 }
1107 }
1108
1109 rb_darray_free(objs);
1110}
1111
1113 int (*func)(void *, void *, size_t, void *);
1114 void *data;
1115};
1116
1117static int
1118rb_gc_impl_each_objects_i(VALUE obj, void *d)
1119{
1120 struct rb_gc_impl_each_objects_data *data = d;
1121
1122 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1123
1124 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
1125}
1126
1127void
1128rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
1129{
1130 struct rb_gc_impl_each_objects_data each_objects_data = {
1131 .func = func,
1132 .data = data
1133 };
1134
1135 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1136}
1137
1139 void (*func)(VALUE, void *);
1140 void *data;
1141};
1142
1143static int
1144rb_gc_impl_each_object_i(VALUE obj, void *d)
1145{
1146 struct rb_gc_impl_each_object_data *data = d;
1147
1148 data->func(obj, data->data);
1149
1150 return 0;
1151}
1152
1153void
1154rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
1155{
1156 struct rb_gc_impl_each_object_data each_object_data = {
1157 .func = func,
1158 .data = data
1159 };
1160
1161 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1162}
1163
1164// Finalizers
1165static VALUE
1166gc_run_finalizers_get_final(long i, void *data)
1167{
1168 VALUE table = (VALUE)data;
1169
1170 return RARRAY_AREF(table, i + 1);
1171}
1172
1173static void
1174gc_run_finalizers(void *data)
1175{
1176 struct objspace *objspace = data;
1177
1178 rb_gc_set_pending_interrupt();
1179
1180 while (objspace->finalizer_jobs != NULL) {
1181 struct MMTk_final_job *job = objspace->finalizer_jobs;
1182 objspace->finalizer_jobs = job->next;
1183
1184 switch (job->kind) {
1185 case MMTK_FINAL_JOB_DFREE:
1186 job->as.dfree.func(job->as.dfree.data);
1187 break;
1188 case MMTK_FINAL_JOB_FINALIZE: {
1189 VALUE finalizer_array = job->as.finalize.finalizer_array;
1190
1191 rb_gc_run_obj_finalizer(
1192 RARRAY_AREF(finalizer_array, 0),
1193 RARRAY_LEN(finalizer_array) - 1,
1194 gc_run_finalizers_get_final,
1195 (void *)finalizer_array
1196 );
1197
1198 RB_GC_GUARD(finalizer_array);
1199 break;
1200 }
1201 }
1202
1203 xfree(job);
1204 }
1205
1206 rb_gc_unset_pending_interrupt();
1207}
1208
1209void
1210rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1211{
1212 if (dfree == NULL) return;
1213
1214 struct objspace *objspace = objspace_ptr;
1215
1216 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1217 job->kind = MMTK_FINAL_JOB_DFREE;
1218 job->as.dfree.func = dfree;
1219 job->as.dfree.data = data;
1220
1221 struct MMTk_final_job *prev;
1222 do {
1223 job->next = objspace->finalizer_jobs;
1224 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1225 } while (prev != job->next);
1226
1227 if (!ruby_free_at_exit_p()) {
1228 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1229 }
1230}
1231
1232VALUE
1233rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1234{
1235 struct objspace *objspace = objspace_ptr;
1236 VALUE table;
1237 st_data_t data;
1238
1239 RBASIC(obj)->flags |= FL_FINALIZE;
1240
1241 int lev = RB_GC_VM_LOCK();
1242
1243 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1244 table = (VALUE)data;
1245
1246 /* avoid duplicate block, table is usually small */
1247 {
1248 long len = RARRAY_LEN(table);
1249 long i;
1250
1251 for (i = 0; i < len; i++) {
1252 VALUE recv = RARRAY_AREF(table, i);
1253 if (rb_equal(recv, block)) {
1254 RB_GC_VM_UNLOCK(lev);
1255 return recv;
1256 }
1257 }
1258 }
1259
1260 rb_ary_push(table, block);
1261 }
1262 else {
1263 table = rb_ary_new3(2, rb_obj_id(obj), block);
1264 rb_obj_hide(table);
1265 st_add_direct(objspace->finalizer_table, obj, table);
1266 }
1267
1268 RB_GC_VM_UNLOCK(lev);
1269
1270 return block;
1271}
1272
1273void
1274rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1275{
1276 struct objspace *objspace = objspace_ptr;
1277
1278 st_data_t data = obj;
1279
1280 int lev = RB_GC_VM_LOCK();
1281 st_delete(objspace->finalizer_table, &data, 0);
1282 RB_GC_VM_UNLOCK(lev);
1283
1284 FL_UNSET(obj, FL_FINALIZE);
1285}
1286
1287void
1288rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1289{
1290 struct objspace *objspace = objspace_ptr;
1291 VALUE table;
1292 st_data_t data;
1293
1294 if (!FL_TEST(obj, FL_FINALIZE)) return;
1295
1296 int lev = RB_GC_VM_LOCK();
1297 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1298 table = rb_ary_dup((VALUE)data);
1299 RARRAY_ASET(table, 0, rb_obj_id(dest));
1300 st_insert(objspace->finalizer_table, dest, table);
1301 FL_SET(dest, FL_FINALIZE);
1302 }
1303 else {
1304 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1305 }
1306 RB_GC_VM_UNLOCK(lev);
1307}
1308
1309static int
1310move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1311{
1312 struct objspace *objspace = (struct objspace *)arg;
1313
1314 make_final_job(objspace, (VALUE)key, (VALUE)val);
1315
1316 return ST_DELETE;
1317}
1318
1319void
1320rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1321{
1322 struct objspace *objspace = objspace_ptr;
1323
1324 while (objspace->finalizer_table->num_entries) {
1325 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1326
1327 gc_run_finalizers(objspace);
1328 }
1329
1330 unsigned int lev = RB_GC_VM_LOCK();
1331 {
1332 struct MMTk_ractor_cache *rc;
1333 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
1334 mmtk_flush_obj_free_buffer(rc);
1335 }
1336
1337 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1338 for (size_t i = 0; i < registered_candidates.len; i++) {
1339 VALUE obj = (VALUE)registered_candidates.ptr[i];
1340
1341 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1342 rb_gc_obj_free(objspace_ptr, obj);
1343 RBASIC(obj)->flags = 0;
1344 }
1345 }
1346 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1347 }
1348 RB_GC_VM_UNLOCK(lev);
1349
1350 gc_run_finalizers(objspace);
1351}
1352
1353// Forking
1354
1355void
1356rb_gc_impl_before_fork(void *objspace_ptr)
1357{
1358 struct objspace *objspace = objspace_ptr;
1359
1360 retry:
1361 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1362 rb_gc_vm_barrier();
1363
1364 /* At this point, we know that all the Ractors are paused because of the
1365 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1366 * one or more Ractors could be paused there. However, mmtk_before_fork is
1367 * not compatible with that because it assumes that the MMTk workers are idle,
1368 * but the workers are not idle because they are busy working on a GC.
1369 *
1370 * This essentially implements a trylock. It will optimistically lock but will
1371 * release the lock if it detects that any other Ractors are waiting in
1372 * rb_mmtk_block_for_gc.
1373 */
1374 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1375 if (mutator_blocking_count != 0) {
1376 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1377 goto retry;
1378 }
1379
1380 mmtk_before_fork();
1381}
1382
1383void
1384rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1385{
1386 struct objspace *objspace = objspace_ptr;
1387
1388 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1389
1390 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1391}
1392
1393// Statistics
1394
1395void
1396rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1397{
1398 struct objspace *objspace = objspace_ptr;
1399
1400 objspace->measure_gc_time = RTEST(flag);
1401}
1402
1403bool
1404rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1405{
1406 struct objspace *objspace = objspace_ptr;
1407
1408 return objspace->measure_gc_time;
1409}
1410
1411unsigned long long
1412rb_gc_impl_get_total_time(void *objspace_ptr)
1413{
1414 struct objspace *objspace = objspace_ptr;
1415
1416 return objspace->total_gc_time;
1417}
1418
1419size_t
1420rb_gc_impl_gc_count(void *objspace_ptr)
1421{
1422 struct objspace *objspace = objspace_ptr;
1423
1424 return objspace->gc_count;
1425}
1426
1427VALUE
1428rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1429{
1430 VALUE hash = Qnil, key = Qnil;
1431
1432 if (SYMBOL_P(hash_or_key)) {
1433 key = hash_or_key;
1434 }
1435 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1436 hash = hash_or_key;
1437 }
1438 else {
1439 rb_bug("gc_info_decode: non-hash or symbol given");
1440 }
1441
1442#define SET(name, attr) \
1443 if (key == ID2SYM(rb_intern_const(#name))) \
1444 return (attr); \
1445 else if (hash != Qnil) \
1446 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1447
1448 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1449 * the :state key and expects a result. This always returns the :none state. */
1450 SET(state, ID2SYM(rb_intern_const("none")));
1451#undef SET
1452
1453 if (!NIL_P(key)) {
1454 // Matched key should return above
1455 return Qundef;
1456 }
1457
1458 return hash;
1459}
1460
1461enum gc_stat_sym {
1462 gc_stat_sym_count,
1463 gc_stat_sym_time,
1464 gc_stat_sym_total_allocated_objects,
1465 gc_stat_sym_total_bytes,
1466 gc_stat_sym_used_bytes,
1467 gc_stat_sym_free_bytes,
1468 gc_stat_sym_starting_heap_address,
1469 gc_stat_sym_last_heap_address,
1470 gc_stat_sym_weak_references_count,
1471 gc_stat_sym_last
1472};
1473
1474static VALUE gc_stat_symbols[gc_stat_sym_last];
1475
1476static void
1477setup_gc_stat_symbols(void)
1478{
1479 if (gc_stat_symbols[0] == 0) {
1480#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1481 S(count);
1482 S(time);
1483 S(total_allocated_objects);
1484 S(total_bytes);
1485 S(used_bytes);
1486 S(free_bytes);
1487 S(starting_heap_address);
1488 S(last_heap_address);
1489 S(weak_references_count);
1490 }
1491}
1492
1493VALUE
1494rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1495{
1496 struct objspace *objspace = objspace_ptr;
1497 VALUE hash = Qnil, key = Qnil;
1498
1499 setup_gc_stat_symbols();
1500
1501 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1502 hash = hash_or_sym;
1503 }
1504 else if (SYMBOL_P(hash_or_sym)) {
1505 key = hash_or_sym;
1506 }
1507 else {
1508 rb_bug("non-hash or symbol given");
1509 }
1510
1511#define SET(name, attr) \
1512 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1513 return SIZET2NUM(attr); \
1514 else if (hash != Qnil) \
1515 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1516
1517 SET(count, objspace->gc_count);
1518 SET(time, objspace->total_gc_time / (1000 * 1000));
1519 SET(total_allocated_objects, objspace->total_allocated_objects);
1520 SET(total_bytes, mmtk_total_bytes());
1521 SET(used_bytes, mmtk_used_bytes());
1522 SET(free_bytes, mmtk_free_bytes());
1523 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1524 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1525 SET(weak_references_count, mmtk_weak_references_count());
1526#undef SET
1527
1528 if (!NIL_P(key)) {
1529 // Matched key should return above
1530 return Qundef;
1531 }
1532
1533 return hash;
1534}
1535
1536VALUE
1537rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1538{
1539 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1540 return hash_or_sym;
1541 }
1542 else {
1543 return Qundef;
1544 }
1545}
1546
1547// Miscellaneous
1548
1549#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1550static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1551
1553rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1554{
1555 static ID ID_object_id;
1556
1557 if (!ID_object_id) {
1558#define I(s) ID_##s = rb_intern(#s);
1559 I(object_id);
1560#undef I
1561 }
1562
1563 size_t n = 0;
1564
1565#define SET_ENTRY(na, v) do { \
1566 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1567 object_metadata_entries[n].name = ID_##na; \
1568 object_metadata_entries[n].val = v; \
1569 n++; \
1570} while (0)
1571
1572 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1573
1574 object_metadata_entries[n].name = 0;
1575 object_metadata_entries[n].val = 0;
1576
1577 return object_metadata_entries;
1578}
1579
1580bool
1581rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1582{
1583 if (ptr == NULL) return false;
1584 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1585 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1586}
1587
1588bool
1589rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1590{
1591 return false;
1592}
1593
1594void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1595
1596void
1597rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1598{
1599 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1600 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1601 }
1602
1603 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1604}
1605
1606// GC Identification
1607
1608const char *
1609rb_gc_impl_active_gc_name(void)
1610{
1611 return "mmtk";
1612}
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1933
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1899
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:561
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:226
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_mGC
GC module.
Definition gc.c:429
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:176
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:855
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5814
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376