Ruby 4.1.0dev (2026-04-04 revision 3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t moving_gc_count;
25 size_t total_gc_time;
26 size_t total_allocated_objects;
27
28 st_table *finalizer_table;
29 struct MMTk_final_job *finalizer_jobs;
30 rb_postponed_job_handle_t finalizer_postponed_job;
31
32 struct ccan_list_head ractor_caches;
33 unsigned long live_ractor_cache_count;
34
35 pthread_mutex_t mutex;
36 rb_atomic_t mutator_blocking_count;
37 bool world_stopped;
38 pthread_cond_t cond_world_stopped;
39 pthread_cond_t cond_world_started;
40 size_t start_the_world_count;
41
42 struct {
43 bool gc_thread_crashed;
44 char crash_msg[256];
45 } crash_context;
46
47 struct rb_gc_vm_context vm_context;
48
49 unsigned int fork_hook_vm_lock_lev;
50};
51
52#define OBJ_FREE_BUF_CAPACITY 128
53
55 struct ccan_list_node list_node;
56
57 MMTk_Mutator *mutator;
58 bool gc_mutator_p;
59
60 MMTk_BumpPointer *bump_pointer;
61
62 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
63 size_t obj_free_parallel_count;
64 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
65 size_t obj_free_non_parallel_count;
66};
67
69 struct MMTk_final_job *next;
70 enum {
71 MMTK_FINAL_JOB_DFREE,
72 MMTK_FINAL_JOB_FINALIZE,
73 } kind;
74 union {
75 struct {
76 void (*func)(void *);
77 void *data;
78 } dfree;
79 struct {
80 /* HACK: we store the object ID on the 0th element of this array. */
81 VALUE finalizer_array;
82 } finalize;
83 } as;
84};
85
86#ifdef RB_THREAD_LOCAL_SPECIFIER
87RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
88
89RB_THREAD_LOCAL_SPECIFIER VALUE marking_parent_object;
90#else
91# error We currently need language-supported TLS
92#endif
93
94#ifdef MMTK_DEBUG
95# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
96#else
97# define MMTK_ASSERT(expr, ...) ((void)0)
98#endif
99
100#include <pthread.h>
101
102static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
103
104static void
105rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
106{
107 rb_mmtk_gc_thread_tls = gc_thread_tls;
108}
109
110static bool
111rb_mmtk_is_mutator(void)
112{
113 return ruby_native_thread_p();
114}
115
116static void
117rb_mmtk_stop_the_world(void)
118{
119 struct objspace *objspace = rb_gc_get_objspace();
120
121 int err;
122 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
123 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
124 }
125
126 while (!objspace->world_stopped) {
127 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
128 }
129
130 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
131 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
132 }
133}
134
135static void
136rb_mmtk_resume_mutators(bool current_gc_may_move)
137{
138 struct objspace *objspace = rb_gc_get_objspace();
139
140 int err;
141 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
142 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
143 }
144
145 objspace->world_stopped = false;
146 objspace->gc_count++;
147 if (current_gc_may_move) objspace->moving_gc_count++;
148 pthread_cond_broadcast(&objspace->cond_world_started);
149
150 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
151 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
152 }
153}
154
155static void mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache);
156
157static void
158rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
159{
160 struct objspace *objspace = rb_gc_get_objspace();
161
162 size_t starting_gc_count = objspace->gc_count;
163 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
164 int lock_lev = RB_GC_VM_LOCK();
165 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
166 int err;
167 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
168 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
169 }
170
171 if (objspace->gc_count == starting_gc_count) {
172 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
173
174 rb_gc_initialize_vm_context(&objspace->vm_context);
175
176 mutator->gc_mutator_p = true;
177
178 struct timespec gc_start_time;
179 if (objspace->measure_gc_time) {
180 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
181 }
182
183 rb_gc_save_machine_context();
184
185 rb_gc_vm_barrier();
186
187 struct MMTk_ractor_cache *rc;
188 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
189 mmtk_flush_obj_free_buffer(rc);
190 }
191
192 objspace->world_stopped = true;
193
194 pthread_cond_broadcast(&objspace->cond_world_stopped);
195
196 // Wait for GC end
197 while (objspace->world_stopped) {
198 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
199 }
200
201 if (RB_UNLIKELY(objspace->crash_context.gc_thread_crashed)) {
202 rb_bug("%s", objspace->crash_context.crash_msg);
203 }
204
205 if (objspace->measure_gc_time) {
206 struct timespec gc_end_time;
207 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
208
209 objspace->total_gc_time +=
210 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
211 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
212 }
213 }
214
215 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
216 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
217 }
218 RB_GC_VM_UNLOCK(lock_lev);
219}
220
221static void
222rb_mmtk_before_updating_jit_code(void)
223{
224 rb_gc_before_updating_jit_code();
225}
226
227static void
228rb_mmtk_after_updating_jit_code(void)
229{
230 rb_gc_after_updating_jit_code();
231}
232
233static size_t
234rb_mmtk_number_of_mutators(void)
235{
236 struct objspace *objspace = rb_gc_get_objspace();
237 return objspace->live_ractor_cache_count;
238}
239
240static void
241rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
242{
243 struct objspace *objspace = rb_gc_get_objspace();
244 struct MMTk_ractor_cache *ractor_cache;
245
246 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
247 visit_mutator(ractor_cache->mutator, data);
248 }
249}
250
251static void
252rb_mmtk_scan_gc_roots(void)
253{
254 struct objspace *objspace = rb_gc_get_objspace();
255
256 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
257 // See: https://github.com/ruby/mmtk/issues/22
258 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
259 rb_gc_mark_roots(objspace, NULL);
260 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
261}
262
263static int
264pin_value(st_data_t key, st_data_t value, st_data_t data)
265{
266 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
267
268 return ST_CONTINUE;
269}
270
271static void
272rb_mmtk_scan_objspace(void)
273{
274 struct objspace *objspace = rb_gc_get_objspace();
275
276 if (objspace->finalizer_table != NULL) {
277 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
278 }
279
280 struct MMTk_final_job *job = objspace->finalizer_jobs;
281 while (job != NULL) {
282 switch (job->kind) {
283 case MMTK_FINAL_JOB_DFREE:
284 break;
285 case MMTK_FINAL_JOB_FINALIZE:
286 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
287 break;
288 default:
289 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
290 }
291
292 job = job->next;
293 }
294}
295
296static void
297rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
298{
299 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
300}
301
302static void
303rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
304{
305 VALUE object = (VALUE)mmtk_object;
306
307 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
308 marking_parent_object = object;
309 rb_gc_update_object_references(rb_gc_get_objspace(), object);
310 marking_parent_object = 0;
311 }
312}
313
314static void
315rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
316{
317 marking_parent_object = (VALUE)object;
318 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
319 marking_parent_object = 0;
320}
321
322static void
323rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
324{
325 VALUE object = (VALUE)mmtk_object;
326
327 marking_parent_object = object;
328
329 rb_gc_handle_weak_references(object);
330
331 if (moving) {
332 rb_gc_update_object_references(rb_gc_get_objspace(), object);
333 }
334
335 marking_parent_object = 0;
336}
337
338static void
339rb_mmtk_call_obj_free(MMTk_ObjectReference object)
340{
341 VALUE obj = (VALUE)object;
342 struct objspace *objspace = rb_gc_get_objspace();
343
344 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
345 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
346 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
347 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
348 }
349
350 rb_gc_obj_free(objspace, obj);
351
352#ifdef MMTK_DEBUG
353 memset((void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
354#endif
355}
356
357static size_t
358rb_mmtk_vm_live_bytes(void)
359{
360 return 0;
361}
362
363static void
364make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
365{
366 MMTK_ASSERT(RB_BUILTIN_TYPE(table) == T_ARRAY);
367
368 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
369 job->next = objspace->finalizer_jobs;
370 job->kind = MMTK_FINAL_JOB_FINALIZE;
371 job->as.finalize.finalizer_array = table;
372
373 objspace->finalizer_jobs = job;
374}
375
376static int
377rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data, int error)
378{
379 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
380 MMTK_ASSERT(RB_BUILTIN_TYPE(value) == T_ARRAY);
381
382 struct objspace *objspace = (struct objspace *)data;
383
384 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
385 VALUE new_key_location = rb_mmtk_call_object_closure((VALUE)key, false);
386
387 MMTK_ASSERT(RB_FL_TEST(new_key_location, RUBY_FL_FINALIZE));
388
389 if (new_key_location != key) {
390 return ST_REPLACE;
391 }
392 }
393 else {
394 make_final_job(objspace, (VALUE)key, (VALUE)value);
395
396 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
397
398 return ST_DELETE;
399 }
400
401 return ST_CONTINUE;
402}
403
404static int
405rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
406{
407 *key = rb_mmtk_call_object_closure((VALUE)*key, false);
408
409 return ST_CONTINUE;
410}
411
412static void
413rb_mmtk_update_finalizer_table(void)
414{
415 struct objspace *objspace = rb_gc_get_objspace();
416
417 st_foreach_with_replace(
418 objspace->finalizer_table,
419 rb_mmtk_update_finalizer_table_i,
420 rb_mmtk_update_finalizer_table_replace_i,
421 (st_data_t)objspace
422 );
423}
424
425static int
426rb_mmtk_global_tables_count(void)
427{
428 return RB_GC_VM_WEAK_TABLE_COUNT;
429}
430
431static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
432
433static int
434rb_mmtk_update_global_tables_i(VALUE val, void *data)
435{
436 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
437 return ST_DELETE;
438 }
439
440 // TODO: check only if in moving GC
441 if (rb_mmtk_call_object_closure(val, false) != val) {
442 return ST_REPLACE;
443 }
444
445 return ST_CONTINUE;
446}
447
448static int
449rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
450{
451 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
452 *ptr = rb_mmtk_call_object_closure(*ptr, false);
453
454 return ST_CONTINUE;
455}
456
457static void
458rb_mmtk_update_global_tables(int table, bool moving)
459{
460 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
461
462 rb_gc_vm_weak_table_foreach(
463 rb_mmtk_update_global_tables_i,
464 rb_mmtk_update_global_tables_replace_i,
465 NULL,
466 !moving,
467 (enum rb_gc_vm_weak_tables)table
468 );
469}
470
471static bool
472rb_mmtk_special_const_p(MMTk_ObjectReference object)
473{
474 VALUE obj = (VALUE)object;
475
476 return RB_SPECIAL_CONST_P(obj);
477}
478
479RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 1, 2)
480static void
481rb_mmtk_gc_thread_bug(const char *msg, ...)
482{
483 struct objspace *objspace = rb_gc_get_objspace();
484
485 objspace->crash_context.gc_thread_crashed = true;
486
487 va_list args;
488 va_start(args, msg);
489 vsnprintf(objspace->crash_context.crash_msg, sizeof(objspace->crash_context.crash_msg), msg, args);
490 va_end(args);
491
492 fprintf(stderr, "-- GC thread backtrace "
493 "-------------------------------------------\n");
494 rb_gc_print_backtrace();
495 fprintf(stderr, "\n");
496
497 rb_mmtk_resume_mutators(false);
498
499 sleep(5);
500
501 rb_bug("rb_mmtk_gc_thread_bug");
502}
503
504static void
505rb_mmtk_gc_thread_panic_handler(void)
506{
507 rb_mmtk_gc_thread_bug("MMTk GC thread panicked");
508}
509
510static void
511rb_mmtk_mutator_thread_panic_handler(void)
512{
513 rb_bug("Ruby mutator thread panicked");
514}
515
516// Bootup
517MMTk_RubyUpcalls ruby_upcalls = {
518 rb_mmtk_init_gc_worker_thread,
519 rb_mmtk_is_mutator,
520 rb_mmtk_stop_the_world,
521 rb_mmtk_resume_mutators,
522 rb_mmtk_block_for_gc,
523 rb_mmtk_before_updating_jit_code,
524 rb_mmtk_after_updating_jit_code,
525 rb_mmtk_number_of_mutators,
526 rb_mmtk_get_mutators,
527 rb_mmtk_scan_gc_roots,
528 rb_mmtk_scan_objspace,
529 rb_mmtk_move_obj_during_marking,
530 rb_mmtk_update_object_references,
531 rb_mmtk_call_gc_mark_children,
532 rb_mmtk_handle_weak_references,
533 rb_mmtk_call_obj_free,
534 rb_mmtk_vm_live_bytes,
535 rb_mmtk_update_global_tables,
536 rb_mmtk_global_tables_count,
537 rb_mmtk_update_finalizer_table,
538 rb_mmtk_special_const_p,
539 rb_mmtk_mutator_thread_panic_handler,
540 rb_mmtk_gc_thread_panic_handler,
541};
542
543// Use max 80% of the available memory by default for MMTk
544#define RB_MMTK_HEAP_LIMIT_PERC 80
545#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
546#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
547
548enum mmtk_heap_mode {
549 RB_MMTK_DYNAMIC_HEAP,
550 RB_MMTK_FIXED_HEAP
551};
552
553MMTk_Builder *
554rb_mmtk_builder_init(void)
555{
556 MMTk_Builder *builder = mmtk_builder_default();
557 return builder;
558}
559
560void *
561rb_gc_impl_objspace_alloc(void)
562{
563 MMTk_Builder *builder = rb_mmtk_builder_init();
564 mmtk_init_binding(builder, NULL, &ruby_upcalls);
565
566 return calloc(1, sizeof(struct objspace));
567}
568
569static void gc_run_finalizers(void *data);
570
571void
572rb_gc_impl_objspace_init(void *objspace_ptr)
573{
574 struct objspace *objspace = objspace_ptr;
575
576 objspace->measure_gc_time = true;
577
578 objspace->finalizer_table = st_init_numtable();
579 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
580
581 ccan_list_head_init(&objspace->ractor_caches);
582
583 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
584 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
585 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
586}
587
588void
589rb_gc_impl_objspace_free(void *objspace_ptr)
590{
591 free(objspace_ptr);
592}
593
594void *
595rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
596{
597 struct objspace *objspace = objspace_ptr;
598 if (objspace->live_ractor_cache_count == 0) {
599 mmtk_initialize_collection(ractor);
600 }
601 objspace->live_ractor_cache_count++;
602
603 struct MMTk_ractor_cache *cache = calloc(1, sizeof(struct MMTk_ractor_cache));
604 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
605
606 cache->mutator = mmtk_bind_mutator(cache);
607 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
608
609 return cache;
610}
611
612void
613rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
614{
615 struct objspace *objspace = objspace_ptr;
616 struct MMTk_ractor_cache *cache = cache_ptr;
617
618 ccan_list_del(&cache->list_node);
619
620 mmtk_flush_obj_free_buffer(cache);
621
622 if (ruby_free_at_exit_p()) {
623 MMTK_ASSERT(objspace->live_ractor_cache_count > 0);
624 }
625 else {
626 MMTK_ASSERT(objspace->live_ractor_cache_count > 1);
627 }
628
629 objspace->live_ractor_cache_count--;
630
631 mmtk_destroy_mutator(cache->mutator);
632}
633
634void rb_gc_impl_set_params(void *objspace_ptr) { }
635
636static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
637
638#define MMTK_HEAP_COUNT 6
639#define MMTK_MAX_OBJ_SIZE 640
640
641static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
642 32, 40, 80, 160, 320, MMTK_MAX_OBJ_SIZE, 0
643};
644
645void
646rb_gc_impl_init(void)
647{
648 VALUE gc_constants = rb_hash_new();
649 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
650 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
651 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
652 // Pretend we have 5 size pools
653 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(MMTK_HEAP_COUNT));
654 // TODO: correctly set RVALUE_OLD_AGE when we have generational GC support
655 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), INT2FIX(0));
656 OBJ_FREEZE(gc_constants);
657 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
658
659 // no-ops for compatibility
660 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
661
665 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
666 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
667}
668
669size_t *
670rb_gc_impl_heap_sizes(void *objspace_ptr)
671{
672 return heap_sizes;
673}
674
675int
676rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
677{
678 struct objspace *objspace = data;
679
680 if (!RB_TYPE_P(obj, T_NONE)) {
681 rb_gc_obj_free_vm_weak_references(obj);
682 rb_gc_obj_free(objspace, obj);
683 }
684
685 return 0;
686}
687
688// Shutdown
689static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
690
691void
692rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
693{
694 mmtk_set_gc_enabled(false);
695 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
696 mmtk_set_gc_enabled(true);
697}
698
699// GC
700void
701rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
702{
703 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
704}
705
706bool
707rb_gc_impl_during_gc_p(void *objspace_ptr)
708{
709 struct objspace *objspace = objspace_ptr;
710 return objspace->world_stopped;
711}
712
713static void
714rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
715{
716 rb_gc_prepare_heap_process_object((VALUE)obj);
717}
718
719void
720rb_gc_impl_prepare_heap(void *objspace_ptr)
721{
722 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
723}
724
725void
726rb_gc_impl_gc_enable(void *objspace_ptr)
727{
728 mmtk_set_gc_enabled(true);
729}
730
731void
732rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
733{
734 mmtk_set_gc_enabled(false);
735}
736
737bool
738rb_gc_impl_gc_enabled_p(void *objspace_ptr)
739{
740 return mmtk_gc_enabled_p();
741}
742
743void
744rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
745{
746 struct objspace *objspace = objspace_ptr;
747
748 objspace->gc_stress = RTEST(flag);
749}
750
751VALUE
752rb_gc_impl_stress_get(void *objspace_ptr)
753{
754 struct objspace *objspace = objspace_ptr;
755
756 return objspace->gc_stress ? Qtrue : Qfalse;
757}
758
759VALUE
760rb_gc_impl_config_get(void *objspace_ptr)
761{
762 VALUE hash = rb_hash_new();
763
764 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
765 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
766 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
767 size_t heap_min = mmtk_heap_min();
768 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
769 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
770
771 return hash;
772}
773
774void
775rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
776{
777 // TODO
778}
779
780// Object allocation
781
782static VALUE
783rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
784{
785 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
786 if (bump_pointer == NULL) return 0;
787
788 uintptr_t new_cursor = bump_pointer->cursor + size;
789
790 if (new_cursor > bump_pointer->limit) {
791 return 0;
792 }
793 else {
794 VALUE obj = (VALUE)bump_pointer->cursor;
795 bump_pointer->cursor = new_cursor;
796 return obj;
797 }
798}
799
800static bool
801obj_can_parallel_free_p(VALUE obj)
802{
803 switch (RB_BUILTIN_TYPE(obj)) {
804 case T_ARRAY:
805 case T_BIGNUM:
806 case T_COMPLEX:
807 case T_FLOAT:
808 case T_HASH:
809 case T_OBJECT:
810 case T_RATIONAL:
811 case T_REGEXP:
812 case T_STRING:
813 case T_STRUCT:
814 case T_SYMBOL:
815 return true;
816 default:
817 return false;
818 }
819}
820
821static void
822mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache)
823{
824 if (cache->obj_free_parallel_count > 0) {
825 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
826 cache->obj_free_parallel_count, true);
827 cache->obj_free_parallel_count = 0;
828 }
829 if (cache->obj_free_non_parallel_count > 0) {
830 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
831 cache->obj_free_non_parallel_count, false);
832 cache->obj_free_non_parallel_count = 0;
833 }
834}
835
836static inline void
837mmtk_buffer_obj_free_candidate(struct MMTk_ractor_cache *cache, VALUE obj)
838{
839 if (obj_can_parallel_free_p(obj)) {
840 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
841 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
842 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
843 cache->obj_free_parallel_count, true);
844 cache->obj_free_parallel_count = 0;
845 }
846 }
847 else {
848 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
849 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
850 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
851 cache->obj_free_non_parallel_count, false);
852 cache->obj_free_non_parallel_count = 0;
853 }
854 }
855}
856
857VALUE
858rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
859{
860#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
861 struct objspace *objspace = objspace_ptr;
862 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
863
864 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
865 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
866 if (alloc_size == heap_sizes[i]) break;
867 if (alloc_size < heap_sizes[i]) {
868 alloc_size = heap_sizes[i];
869 break;
870 }
871 }
872
873 if (objspace->gc_stress) {
874 mmtk_handle_user_collection_request(ractor_cache, false, false);
875 }
876
877 alloc_size += sizeof(VALUE);
878
879 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
880 if (!alloc_obj) {
881 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
882 }
883
884 alloc_obj++;
885 alloc_obj[-1] = alloc_size - sizeof(VALUE);
886 alloc_obj[0] = flags;
887 alloc_obj[1] = klass;
888
889 // TODO: implement fast path for mmtk_post_alloc
890 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
891
892 // TODO: only add when object needs obj_free to be called
893 mmtk_buffer_obj_free_candidate(ractor_cache, (VALUE)alloc_obj);
894
895 objspace->total_allocated_objects++;
896
897 return (VALUE)alloc_obj;
898}
899
900size_t
901rb_gc_impl_obj_slot_size(VALUE obj)
902{
903 return ((VALUE *)obj)[-1];
904}
905
906size_t
907rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
908{
909 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
910 if (size == heap_sizes[i]) return i;
911 if (size < heap_sizes[i]) return i;
912 }
913
914 rb_bug("size too big");
915}
916
917bool
918rb_gc_impl_size_allocatable_p(size_t size)
919{
920 return size <= MMTK_MAX_OBJ_SIZE;
921}
922
923// Malloc
924void *
925rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
926{
927 // TODO: don't use system malloc
928 return malloc(size);
929}
930
931void *
932rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
933{
934 // TODO: don't use system calloc
935 return calloc(1, size);
936}
937
938void *
939rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
940{
941 // TODO: don't use system realloc
942 return realloc(ptr, new_size);
943}
944
945void
946rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
947{
948 // TODO: don't use system free
949 free(ptr);
950}
951
952void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
953
954// Marking
955static inline VALUE
956rb_mmtk_call_object_closure(VALUE obj, bool pin)
957{
958 if (RB_UNLIKELY(RB_BUILTIN_TYPE(obj) == T_NONE)) {
959 const size_t info_size = 256;
960 char obj_info_buf[info_size];
961 rb_raw_obj_info(obj_info_buf, info_size, obj);
962
963 char parent_obj_info_buf[info_size];
964 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
965
966 rb_mmtk_gc_thread_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
967 }
968
969 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
970 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
971 rb_mmtk_gc_thread_tls->gc_context,
972 (MMTk_ObjectReference)obj,
973 pin
974 );
975}
976
977void
978rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
979{
980 if (RB_SPECIAL_CONST_P(obj)) return;
981
982 rb_mmtk_call_object_closure(obj, false);
983}
984
985void
986rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
987{
988 if (RB_SPECIAL_CONST_P(*ptr)) return;
989
990 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
991 if (new_obj != *ptr) {
992 *ptr = new_obj;
993 }
994}
995
996void
997rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
998{
999 if (RB_SPECIAL_CONST_P(obj)) return;
1000
1001 rb_mmtk_call_object_closure(obj, true);
1002}
1003
1004void
1005rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
1006{
1007 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
1008 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1009 }
1010}
1011
1012void
1013rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
1014{
1016 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1017}
1018
1019bool
1020rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
1021{
1022 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1023}
1024
1025// Compaction
1026void
1027rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
1028{
1029 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1030}
1031
1032bool
1033rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
1034{
1035 return rb_mmtk_call_object_closure(obj, false) != obj;
1036}
1037
1038VALUE
1039rb_gc_impl_location(void *objspace_ptr, VALUE obj)
1040{
1041 return rb_mmtk_call_object_closure(obj, false);
1042}
1043
1044// Write barriers
1045void
1046rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
1047{
1048 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1049
1050 if (SPECIAL_CONST_P(b)) return;
1051
1052#ifdef MMTK_DEBUG
1053 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)a)) {
1054 char buff[256];
1055 rb_bug("a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1056 }
1057
1058 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)b)) {
1059 char buff[256];
1060 rb_bug("b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1061 }
1062#endif
1063
1064 MMTK_ASSERT(BUILTIN_TYPE(a) != T_NONE);
1065 MMTK_ASSERT(BUILTIN_TYPE(b) != T_NONE);
1066
1067 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1068}
1069
1070void
1071rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
1072{
1073 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1074}
1075
1076void
1077rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
1078{
1079 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1080
1081 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1082}
1083
1084// Heap walking
1085static void
1086each_objects_i(MMTk_ObjectReference obj, void *d)
1087{
1088 rb_darray(VALUE) *objs = d;
1089
1090 rb_darray_append(objs, (VALUE)obj);
1091}
1092
1093static void
1094each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
1095{
1096 rb_darray(VALUE) objs;
1097 rb_darray_make(&objs, 0);
1098
1099 mmtk_enumerate_objects(each_objects_i, &objs);
1100
1101 VALUE *obj_ptr;
1102 rb_darray_foreach(objs, i, obj_ptr) {
1103 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
1104
1105 if (func(*obj_ptr, data) != 0) {
1106 break;
1107 }
1108 }
1109
1110 rb_darray_free(objs);
1111}
1112
1114 int (*func)(void *, void *, size_t, void *);
1115 void *data;
1116};
1117
1118static int
1119rb_gc_impl_each_objects_i(VALUE obj, void *d)
1120{
1121 struct rb_gc_impl_each_objects_data *data = d;
1122
1123 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1124
1125 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
1126}
1127
1128void
1129rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
1130{
1131 struct rb_gc_impl_each_objects_data each_objects_data = {
1132 .func = func,
1133 .data = data
1134 };
1135
1136 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1137}
1138
1140 void (*func)(VALUE, void *);
1141 void *data;
1142};
1143
1144static int
1145rb_gc_impl_each_object_i(VALUE obj, void *d)
1146{
1147 struct rb_gc_impl_each_object_data *data = d;
1148
1149 data->func(obj, data->data);
1150
1151 return 0;
1152}
1153
1154void
1155rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
1156{
1157 struct rb_gc_impl_each_object_data each_object_data = {
1158 .func = func,
1159 .data = data
1160 };
1161
1162 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1163}
1164
1165// Finalizers
1166static VALUE
1167gc_run_finalizers_get_final(long i, void *data)
1168{
1169 VALUE table = (VALUE)data;
1170
1171 return RARRAY_AREF(table, i + 1);
1172}
1173
1174static void
1175gc_run_finalizers(void *data)
1176{
1177 struct objspace *objspace = data;
1178
1179 rb_gc_set_pending_interrupt();
1180
1181 while (objspace->finalizer_jobs != NULL) {
1182 struct MMTk_final_job *job = objspace->finalizer_jobs;
1183 objspace->finalizer_jobs = job->next;
1184
1185 switch (job->kind) {
1186 case MMTK_FINAL_JOB_DFREE:
1187 job->as.dfree.func(job->as.dfree.data);
1188 break;
1189 case MMTK_FINAL_JOB_FINALIZE: {
1190 VALUE finalizer_array = job->as.finalize.finalizer_array;
1191
1192 rb_gc_run_obj_finalizer(
1193 RARRAY_AREF(finalizer_array, 0),
1194 RARRAY_LEN(finalizer_array) - 1,
1195 gc_run_finalizers_get_final,
1196 (void *)finalizer_array
1197 );
1198
1199 RB_GC_GUARD(finalizer_array);
1200 break;
1201 }
1202 }
1203
1204 xfree(job);
1205 }
1206
1207 rb_gc_unset_pending_interrupt();
1208}
1209
1210void
1211rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1212{
1213 if (dfree == NULL) return;
1214
1215 struct objspace *objspace = objspace_ptr;
1216
1217 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1218 job->kind = MMTK_FINAL_JOB_DFREE;
1219 job->as.dfree.func = dfree;
1220 job->as.dfree.data = data;
1221
1222 struct MMTk_final_job *prev;
1223 do {
1224 job->next = objspace->finalizer_jobs;
1225 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1226 } while (prev != job->next);
1227
1228 if (!ruby_free_at_exit_p()) {
1229 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1230 }
1231}
1232
1233VALUE
1234rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1235{
1236 struct objspace *objspace = objspace_ptr;
1237 VALUE table;
1238 st_data_t data;
1239
1240 RBASIC(obj)->flags |= FL_FINALIZE;
1241
1242 int lev = RB_GC_VM_LOCK();
1243
1244 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1245 table = (VALUE)data;
1246
1247 /* avoid duplicate block, table is usually small */
1248 {
1249 long len = RARRAY_LEN(table);
1250 long i;
1251
1252 for (i = 0; i < len; i++) {
1253 VALUE recv = RARRAY_AREF(table, i);
1254 if (rb_equal(recv, block)) {
1255 RB_GC_VM_UNLOCK(lev);
1256 return recv;
1257 }
1258 }
1259 }
1260
1261 rb_ary_push(table, block);
1262 }
1263 else {
1264 table = rb_ary_new3(2, rb_obj_id(obj), block);
1265 rb_obj_hide(table);
1266 st_add_direct(objspace->finalizer_table, obj, table);
1267 }
1268
1269 RB_GC_VM_UNLOCK(lev);
1270
1271 return block;
1272}
1273
1274void
1275rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1276{
1277 struct objspace *objspace = objspace_ptr;
1278
1279 st_data_t data = obj;
1280
1281 int lev = RB_GC_VM_LOCK();
1282 st_delete(objspace->finalizer_table, &data, 0);
1283 RB_GC_VM_UNLOCK(lev);
1284
1285 FL_UNSET(obj, FL_FINALIZE);
1286}
1287
1288void
1289rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1290{
1291 struct objspace *objspace = objspace_ptr;
1292 VALUE table;
1293 st_data_t data;
1294
1295 if (!FL_TEST(obj, FL_FINALIZE)) return;
1296
1297 int lev = RB_GC_VM_LOCK();
1298 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1299 table = rb_ary_dup((VALUE)data);
1300 RARRAY_ASET(table, 0, rb_obj_id(dest));
1301 st_insert(objspace->finalizer_table, dest, table);
1302 FL_SET(dest, FL_FINALIZE);
1303 }
1304 else {
1305 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1306 }
1307 RB_GC_VM_UNLOCK(lev);
1308}
1309
1310static int
1311move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1312{
1313 struct objspace *objspace = (struct objspace *)arg;
1314
1315 make_final_job(objspace, (VALUE)key, (VALUE)val);
1316
1317 return ST_DELETE;
1318}
1319
1320void
1321rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1322{
1323 struct objspace *objspace = objspace_ptr;
1324
1325 while (objspace->finalizer_table->num_entries) {
1326 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1327
1328 gc_run_finalizers(objspace);
1329 }
1330
1331 unsigned int lev = RB_GC_VM_LOCK();
1332 {
1333 struct MMTk_ractor_cache *rc;
1334 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
1335 mmtk_flush_obj_free_buffer(rc);
1336 }
1337
1338 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1339 for (size_t i = 0; i < registered_candidates.len; i++) {
1340 VALUE obj = (VALUE)registered_candidates.ptr[i];
1341
1342 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1343 rb_gc_obj_free(objspace_ptr, obj);
1344 RBASIC(obj)->flags = 0;
1345 }
1346 }
1347 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1348 }
1349 RB_GC_VM_UNLOCK(lev);
1350
1351 gc_run_finalizers(objspace);
1352}
1353
1354// Forking
1355
1356void
1357rb_gc_impl_before_fork(void *objspace_ptr)
1358{
1359 struct objspace *objspace = objspace_ptr;
1360
1361 retry:
1362 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1363 rb_gc_vm_barrier();
1364
1365 /* At this point, we know that all the Ractors are paused because of the
1366 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1367 * one or more Ractors could be paused there. However, mmtk_before_fork is
1368 * not compatible with that because it assumes that the MMTk workers are idle,
1369 * but the workers are not idle because they are busy working on a GC.
1370 *
1371 * This essentially implements a trylock. It will optimistically lock but will
1372 * release the lock if it detects that any other Ractors are waiting in
1373 * rb_mmtk_block_for_gc.
1374 */
1375 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1376 if (mutator_blocking_count != 0) {
1377 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1378 goto retry;
1379 }
1380
1381 mmtk_before_fork();
1382}
1383
1384void
1385rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1386{
1387 struct objspace *objspace = objspace_ptr;
1388
1389 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1390
1391 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1392}
1393
1394// Statistics
1395
1396void
1397rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1398{
1399 struct objspace *objspace = objspace_ptr;
1400
1401 objspace->measure_gc_time = RTEST(flag);
1402}
1403
1404bool
1405rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1406{
1407 struct objspace *objspace = objspace_ptr;
1408
1409 return objspace->measure_gc_time;
1410}
1411
1412unsigned long long
1413rb_gc_impl_get_total_time(void *objspace_ptr)
1414{
1415 struct objspace *objspace = objspace_ptr;
1416
1417 return objspace->total_gc_time;
1418}
1419
1420size_t
1421rb_gc_impl_gc_count(void *objspace_ptr)
1422{
1423 struct objspace *objspace = objspace_ptr;
1424
1425 return objspace->gc_count;
1426}
1427
1428VALUE
1429rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1430{
1431 VALUE hash = Qnil, key = Qnil;
1432
1433 if (SYMBOL_P(hash_or_key)) {
1434 key = hash_or_key;
1435 }
1436 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1437 hash = hash_or_key;
1438 }
1439 else {
1440 rb_bug("gc_info_decode: non-hash or symbol given");
1441 }
1442
1443#define SET(name, attr) \
1444 if (key == ID2SYM(rb_intern_const(#name))) \
1445 return (attr); \
1446 else if (hash != Qnil) \
1447 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1448
1449 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1450 * the :state key and expects a result. This always returns the :none state. */
1451 SET(state, ID2SYM(rb_intern_const("none")));
1452#undef SET
1453
1454 if (!NIL_P(key)) {
1455 // Matched key should return above
1456 return Qundef;
1457 }
1458
1459 return hash;
1460}
1461
1462enum gc_stat_sym {
1463 gc_stat_sym_count,
1464 gc_stat_sym_moving_gc_count,
1465 gc_stat_sym_time,
1466 gc_stat_sym_total_allocated_objects,
1467 gc_stat_sym_total_bytes,
1468 gc_stat_sym_used_bytes,
1469 gc_stat_sym_free_bytes,
1470 gc_stat_sym_starting_heap_address,
1471 gc_stat_sym_last_heap_address,
1472 gc_stat_sym_weak_references_count,
1473 gc_stat_sym_last
1474};
1475
1476static VALUE gc_stat_symbols[gc_stat_sym_last];
1477
1478static void
1479setup_gc_stat_symbols(void)
1480{
1481 if (gc_stat_symbols[0] == 0) {
1482#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1483 S(count);
1484 S(moving_gc_count);
1485 S(time);
1486 S(total_allocated_objects);
1487 S(total_bytes);
1488 S(used_bytes);
1489 S(free_bytes);
1490 S(starting_heap_address);
1491 S(last_heap_address);
1492 S(weak_references_count);
1493 }
1494}
1495
1496VALUE
1497rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1498{
1499 struct objspace *objspace = objspace_ptr;
1500 VALUE hash = Qnil, key = Qnil;
1501
1502 setup_gc_stat_symbols();
1503
1504 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1505 hash = hash_or_sym;
1506 }
1507 else if (SYMBOL_P(hash_or_sym)) {
1508 key = hash_or_sym;
1509 }
1510 else {
1511 rb_bug("non-hash or symbol given");
1512 }
1513
1514#define SET(name, attr) \
1515 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1516 return SIZET2NUM(attr); \
1517 else if (hash != Qnil) \
1518 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1519
1520 SET(count, objspace->gc_count);
1521 SET(moving_gc_count, objspace->moving_gc_count);
1522 SET(time, objspace->total_gc_time / (1000 * 1000));
1523 SET(total_allocated_objects, objspace->total_allocated_objects);
1524 SET(total_bytes, mmtk_total_bytes());
1525 SET(used_bytes, mmtk_used_bytes());
1526 SET(free_bytes, mmtk_free_bytes());
1527 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1528 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1529 SET(weak_references_count, mmtk_weak_references_count());
1530#undef SET
1531
1532 if (!NIL_P(key)) {
1533 // Matched key should return above
1534 return Qundef;
1535 }
1536
1537 return hash;
1538}
1539
1540VALUE
1541rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1542{
1543 if (FIXNUM_P(heap_name) && SYMBOL_P(hash_or_sym)) {
1544 int heap_idx = FIX2INT(heap_name);
1545 if (heap_idx < 0 || heap_idx >= MMTK_HEAP_COUNT) {
1546 rb_raise(rb_eArgError, "size pool index out of range");
1547 }
1548
1549 if (hash_or_sym == ID2SYM(rb_intern("slot_size"))) {
1550 return SIZET2NUM(heap_sizes[heap_idx]);
1551 }
1552
1553 return Qundef;
1554 }
1555
1556 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1557 return hash_or_sym;
1558 }
1559
1560 return Qundef;
1561}
1562
1563// Miscellaneous
1564
1565#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1566static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1567
1569rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1570{
1571 static ID ID_object_id;
1572
1573 if (!ID_object_id) {
1574#define I(s) ID_##s = rb_intern(#s);
1575 I(object_id);
1576#undef I
1577 }
1578
1579 size_t n = 0;
1580
1581#define SET_ENTRY(na, v) do { \
1582 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1583 object_metadata_entries[n].name = ID_##na; \
1584 object_metadata_entries[n].val = v; \
1585 n++; \
1586} while (0)
1587
1588 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1589
1590 object_metadata_entries[n].name = 0;
1591 object_metadata_entries[n].val = 0;
1592
1593 return object_metadata_entries;
1594}
1595
1596bool
1597rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1598{
1599 if (ptr == NULL) return false;
1600 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1601 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1602}
1603
1604bool
1605rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1606{
1607 return false;
1608}
1609
1610void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1611
1612void
1613rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1614{
1615 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1616 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1617 }
1618
1619 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1620}
1621
1622// GC Identification
1623
1624const char *
1625rb_gc_impl_active_gc_name(void)
1626{
1627 return "mmtk";
1628}
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1916
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1882
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:561
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:226
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:95
VALUE rb_mGC
GC module.
Definition gc.c:429
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:141
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:859
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376