Ruby 4.1.0dev (2026-04-19 revision f39318083e67c6d1a64d9ea03a5ef944ffb52f76)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t moving_gc_count;
25 size_t total_gc_time;
26 size_t total_allocated_objects;
27
28 st_table *finalizer_table;
29 struct MMTk_final_job *finalizer_jobs;
30 rb_postponed_job_handle_t finalizer_postponed_job;
31
32 struct ccan_list_head ractor_caches;
33 unsigned long live_ractor_cache_count;
34
35 pthread_mutex_t mutex;
36 rb_atomic_t mutator_blocking_count;
37 bool world_stopped;
38 pthread_cond_t cond_world_stopped;
39 pthread_cond_t cond_world_started;
40 size_t start_the_world_count;
41
42 struct {
43 bool gc_thread_crashed;
44 char crash_msg[256];
45 } crash_context;
46
47 struct rb_gc_vm_context vm_context;
48
49 unsigned int fork_hook_vm_lock_lev;
50};
51
52#define OBJ_FREE_BUF_CAPACITY 128
53
55 struct ccan_list_node list_node;
56
57 MMTk_Mutator *mutator;
58 bool gc_mutator_p;
59
60 MMTk_BumpPointer *bump_pointer;
61
62 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
63 size_t obj_free_parallel_count;
64 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
65 size_t obj_free_non_parallel_count;
66};
67
69 struct MMTk_final_job *next;
70 enum {
71 MMTK_FINAL_JOB_DFREE,
72 MMTK_FINAL_JOB_FINALIZE,
73 } kind;
74 union {
75 struct {
76 void (*func)(void *);
77 void *data;
78 } dfree;
79 struct {
80 /* HACK: we store the object ID on the 0th element of this array. */
81 VALUE finalizer_array;
82 } finalize;
83 } as;
84};
85
86#ifdef RB_THREAD_LOCAL_SPECIFIER
87RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
88
89RB_THREAD_LOCAL_SPECIFIER VALUE marking_parent_object;
90#else
91# error We currently need language-supported TLS
92#endif
93
94#ifdef MMTK_DEBUG
95# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
96#else
97# define MMTK_ASSERT(expr, ...) ((void)0)
98#endif
99
100#include <pthread.h>
101
102static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
103
104static void
105rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
106{
107 rb_mmtk_gc_thread_tls = gc_thread_tls;
108}
109
110static bool
111rb_mmtk_is_mutator(void)
112{
113 return ruby_native_thread_p();
114}
115
116static void
117rb_mmtk_stop_the_world(void)
118{
119 struct objspace *objspace = rb_gc_get_objspace();
120
121 int err;
122 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
123 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
124 }
125
126 while (!objspace->world_stopped) {
127 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
128 }
129
130 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
131 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
132 }
133}
134
135static void
136rb_mmtk_resume_mutators(bool current_gc_may_move)
137{
138 struct objspace *objspace = rb_gc_get_objspace();
139
140 int err;
141 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
142 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
143 }
144
145 objspace->world_stopped = false;
146 objspace->gc_count++;
147 if (current_gc_may_move) objspace->moving_gc_count++;
148 pthread_cond_broadcast(&objspace->cond_world_started);
149
150 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
151 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
152 }
153}
154
155static void mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache);
156
157static void
158rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
159{
160 struct objspace *objspace = rb_gc_get_objspace();
161
162 size_t starting_gc_count = objspace->gc_count;
163 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
164 int lock_lev = RB_GC_VM_LOCK();
165 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
166 int err;
167 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
168 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
169 }
170
171 if (objspace->gc_count == starting_gc_count) {
172 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
173
174 rb_gc_initialize_vm_context(&objspace->vm_context);
175
176 mutator->gc_mutator_p = true;
177
178 struct timespec gc_start_time;
179 if (objspace->measure_gc_time) {
180 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
181 }
182
183 rb_gc_save_machine_context();
184
185 rb_gc_vm_barrier();
186
187 struct MMTk_ractor_cache *rc;
188 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
189 mmtk_flush_obj_free_buffer(rc);
190 }
191
192 objspace->world_stopped = true;
193
194 pthread_cond_broadcast(&objspace->cond_world_stopped);
195
196 // Wait for GC end
197 while (objspace->world_stopped) {
198 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
199 }
200
201 if (RB_UNLIKELY(objspace->crash_context.gc_thread_crashed)) {
202 rb_bug("%s", objspace->crash_context.crash_msg);
203 }
204
205 if (objspace->measure_gc_time) {
206 struct timespec gc_end_time;
207 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
208
209 objspace->total_gc_time +=
210 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
211 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
212 }
213 }
214
215 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
216 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
217 }
218 RB_GC_VM_UNLOCK(lock_lev);
219}
220
221static void
222rb_mmtk_before_updating_jit_code(void)
223{
224 rb_gc_before_updating_jit_code();
225}
226
227static void
228rb_mmtk_after_updating_jit_code(void)
229{
230 rb_gc_after_updating_jit_code();
231}
232
233static size_t
234rb_mmtk_number_of_mutators(void)
235{
236 struct objspace *objspace = rb_gc_get_objspace();
237 return objspace->live_ractor_cache_count;
238}
239
240static void
241rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
242{
243 struct objspace *objspace = rb_gc_get_objspace();
244 struct MMTk_ractor_cache *ractor_cache;
245
246 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
247 visit_mutator(ractor_cache->mutator, data);
248 }
249}
250
251static void
252rb_mmtk_scan_gc_roots(void)
253{
254 struct objspace *objspace = rb_gc_get_objspace();
255
256 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
257 // See: https://github.com/ruby/mmtk/issues/22
258 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
259 rb_gc_mark_roots(objspace, NULL);
260 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
261}
262
263static int
264pin_value(st_data_t key, st_data_t value, st_data_t data)
265{
266 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
267
268 return ST_CONTINUE;
269}
270
271static void
272rb_mmtk_scan_objspace(void)
273{
274 struct objspace *objspace = rb_gc_get_objspace();
275
276 if (objspace->finalizer_table != NULL) {
277 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
278 }
279
280 struct MMTk_final_job *job = objspace->finalizer_jobs;
281 while (job != NULL) {
282 switch (job->kind) {
283 case MMTK_FINAL_JOB_DFREE:
284 break;
285 case MMTK_FINAL_JOB_FINALIZE:
286 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
287 break;
288 default:
289 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
290 }
291
292 job = job->next;
293 }
294}
295
296static void
297rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
298{
299 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
300}
301
302static void
303rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
304{
305 VALUE object = (VALUE)mmtk_object;
306
307 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
308 marking_parent_object = object;
309 rb_gc_update_object_references(rb_gc_get_objspace(), object);
310 marking_parent_object = 0;
311 }
312}
313
314static void
315rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
316{
317 marking_parent_object = (VALUE)object;
318 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
319 marking_parent_object = 0;
320}
321
322static void
323rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
324{
325 VALUE object = (VALUE)mmtk_object;
326
327 marking_parent_object = object;
328
329 rb_gc_handle_weak_references(object);
330
331 if (moving) {
332 rb_gc_update_object_references(rb_gc_get_objspace(), object);
333 }
334
335 marking_parent_object = 0;
336}
337
338static void
339rb_mmtk_call_obj_free(MMTk_ObjectReference object)
340{
341 VALUE obj = (VALUE)object;
342 struct objspace *objspace = rb_gc_get_objspace();
343
344 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
345 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
346 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
347 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
348 }
349
350 rb_gc_obj_free(objspace, obj);
351
352#ifdef MMTK_DEBUG
353 memset((void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
354#endif
355}
356
357static size_t
358rb_mmtk_vm_live_bytes(void)
359{
360 return 0;
361}
362
363static void
364make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
365{
366 MMTK_ASSERT(RB_BUILTIN_TYPE(table) == T_ARRAY);
367
368 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
369 job->next = objspace->finalizer_jobs;
370 job->kind = MMTK_FINAL_JOB_FINALIZE;
371 job->as.finalize.finalizer_array = table;
372
373 objspace->finalizer_jobs = job;
374}
375
376static int
377rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data, int error)
378{
379 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
380 MMTK_ASSERT(RB_BUILTIN_TYPE(value) == T_ARRAY);
381
382 struct objspace *objspace = (struct objspace *)data;
383
384 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
385 VALUE new_key_location = rb_mmtk_call_object_closure((VALUE)key, false);
386
387 MMTK_ASSERT(RB_FL_TEST(new_key_location, RUBY_FL_FINALIZE));
388
389 if (new_key_location != key) {
390 return ST_REPLACE;
391 }
392 }
393 else {
394 make_final_job(objspace, (VALUE)key, (VALUE)value);
395
396 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
397
398 return ST_DELETE;
399 }
400
401 return ST_CONTINUE;
402}
403
404static int
405rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
406{
407 *key = rb_mmtk_call_object_closure((VALUE)*key, false);
408
409 return ST_CONTINUE;
410}
411
412static void
413rb_mmtk_update_finalizer_table(void)
414{
415 struct objspace *objspace = rb_gc_get_objspace();
416
417 st_foreach_with_replace(
418 objspace->finalizer_table,
419 rb_mmtk_update_finalizer_table_i,
420 rb_mmtk_update_finalizer_table_replace_i,
421 (st_data_t)objspace
422 );
423}
424
425static int
426rb_mmtk_global_tables_count(void)
427{
428 return RB_GC_VM_WEAK_TABLE_COUNT;
429}
430
431static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
432
433static int
434rb_mmtk_update_global_tables_i(VALUE val, void *data)
435{
436 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
437 return ST_DELETE;
438 }
439
440 // TODO: check only if in moving GC
441 if (rb_mmtk_call_object_closure(val, false) != val) {
442 return ST_REPLACE;
443 }
444
445 return ST_CONTINUE;
446}
447
448static int
449rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
450{
451 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
452 *ptr = rb_mmtk_call_object_closure(*ptr, false);
453
454 return ST_CONTINUE;
455}
456
457static void
458rb_mmtk_update_global_tables(int table, bool moving)
459{
460 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
461
462 rb_gc_vm_weak_table_foreach(
463 rb_mmtk_update_global_tables_i,
464 rb_mmtk_update_global_tables_replace_i,
465 NULL,
466 !moving,
467 (enum rb_gc_vm_weak_tables)table
468 );
469}
470
471static bool
472rb_mmtk_special_const_p(MMTk_ObjectReference object)
473{
474 VALUE obj = (VALUE)object;
475
476 return RB_SPECIAL_CONST_P(obj);
477}
478
479RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 1, 2)
480static void
481rb_mmtk_gc_thread_bug(const char *msg, ...)
482{
483 struct objspace *objspace = rb_gc_get_objspace();
484
485 objspace->crash_context.gc_thread_crashed = true;
486
487 va_list args;
488 va_start(args, msg);
489 vsnprintf(objspace->crash_context.crash_msg, sizeof(objspace->crash_context.crash_msg), msg, args);
490 va_end(args);
491
492 fprintf(stderr, "-- GC thread backtrace "
493 "-------------------------------------------\n");
494 rb_gc_print_backtrace();
495 fprintf(stderr, "\n");
496
497 rb_mmtk_resume_mutators(false);
498
499 sleep(5);
500
501 rb_bug("rb_mmtk_gc_thread_bug");
502}
503
504static void
505rb_mmtk_gc_thread_panic_handler(void)
506{
507 rb_mmtk_gc_thread_bug("MMTk GC thread panicked");
508}
509
510static void
511rb_mmtk_mutator_thread_panic_handler(void)
512{
513 rb_bug("Ruby mutator thread panicked");
514}
515
516// Bootup
517MMTk_RubyUpcalls ruby_upcalls = {
518 rb_mmtk_init_gc_worker_thread,
519 rb_mmtk_is_mutator,
520 rb_mmtk_stop_the_world,
521 rb_mmtk_resume_mutators,
522 rb_mmtk_block_for_gc,
523 rb_mmtk_before_updating_jit_code,
524 rb_mmtk_after_updating_jit_code,
525 rb_mmtk_number_of_mutators,
526 rb_mmtk_get_mutators,
527 rb_mmtk_scan_gc_roots,
528 rb_mmtk_scan_objspace,
529 rb_mmtk_move_obj_during_marking,
530 rb_mmtk_update_object_references,
531 rb_mmtk_call_gc_mark_children,
532 rb_mmtk_handle_weak_references,
533 rb_mmtk_call_obj_free,
534 rb_mmtk_vm_live_bytes,
535 rb_mmtk_update_global_tables,
536 rb_mmtk_global_tables_count,
537 rb_mmtk_update_finalizer_table,
538 rb_mmtk_special_const_p,
539 rb_mmtk_mutator_thread_panic_handler,
540 rb_mmtk_gc_thread_panic_handler,
541};
542
543// Use max 80% of the available memory by default for MMTk
544#define RB_MMTK_HEAP_LIMIT_PERC 80
545#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
546#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
547
548enum mmtk_heap_mode {
549 RB_MMTK_DYNAMIC_HEAP,
550 RB_MMTK_FIXED_HEAP
551};
552
553MMTk_Builder *
554rb_mmtk_builder_init(void)
555{
556 MMTk_Builder *builder = mmtk_builder_default();
557 return builder;
558}
559
560void *
561rb_gc_impl_objspace_alloc(void)
562{
563 MMTk_Builder *builder = rb_mmtk_builder_init();
564 mmtk_init_binding(builder, NULL, &ruby_upcalls);
565
566 return calloc(1, sizeof(struct objspace));
567}
568
569static void gc_run_finalizers(void *data);
570
571void
572rb_gc_impl_objspace_init(void *objspace_ptr)
573{
574 struct objspace *objspace = objspace_ptr;
575
576 objspace->measure_gc_time = true;
577
578 objspace->finalizer_table = st_init_numtable();
579 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
580
581 ccan_list_head_init(&objspace->ractor_caches);
582
583 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
584 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
585 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
586}
587
588void
589rb_gc_impl_objspace_free(void *objspace_ptr)
590{
591 free(objspace_ptr);
592}
593
594void *
595rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
596{
597 struct objspace *objspace = objspace_ptr;
598 if (objspace->live_ractor_cache_count == 0) {
599 mmtk_initialize_collection(ractor);
600 }
601 objspace->live_ractor_cache_count++;
602
603 struct MMTk_ractor_cache *cache = calloc(1, sizeof(struct MMTk_ractor_cache));
604 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
605
606 cache->mutator = mmtk_bind_mutator(cache);
607 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
608
609 return cache;
610}
611
612void
613rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
614{
615 struct objspace *objspace = objspace_ptr;
616 struct MMTk_ractor_cache *cache = cache_ptr;
617
618 ccan_list_del(&cache->list_node);
619
620 mmtk_flush_obj_free_buffer(cache);
621
622 if (ruby_free_at_exit_p()) {
623 MMTK_ASSERT(objspace->live_ractor_cache_count > 0);
624 }
625 else {
626 MMTK_ASSERT(objspace->live_ractor_cache_count > 1);
627 }
628
629 objspace->live_ractor_cache_count--;
630
631 mmtk_destroy_mutator(cache->mutator);
632}
633
634void rb_gc_impl_set_params(void *objspace_ptr) { }
635
636static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
637
638#if SIZEOF_VALUE >= 8
639#define MMTK_HEAP_COUNT 12
640#define MMTK_MAX_OBJ_SIZE 1024
641static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
642 32, 40, 64, 80, 96, 128, 160, 256, 512, 640, 768, MMTK_MAX_OBJ_SIZE, 0
643};
644#else
645#define MMTK_HEAP_COUNT 5
646#define MMTK_MAX_OBJ_SIZE 512
647static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
648 32, 64, 128, 256, MMTK_MAX_OBJ_SIZE, 0
649};
650#endif
651
652void
653rb_gc_impl_init(void)
654{
655 VALUE gc_constants = rb_hash_new();
656 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(SIZEOF_VALUE >= 8 ? 64 : 32));
657 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
658 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
659 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
660 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(MMTK_HEAP_COUNT));
661 // TODO: correctly set RVALUE_OLD_AGE when we have generational GC support
662 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), INT2FIX(0));
663 OBJ_FREEZE(gc_constants);
664 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
665
666 // no-ops for compatibility
667 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
668
672 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
673 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
674}
675
676size_t *
677rb_gc_impl_heap_sizes(void *objspace_ptr)
678{
679 return heap_sizes;
680}
681
682int
683rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
684{
685 struct objspace *objspace = data;
686
687 if (!RB_TYPE_P(obj, T_NONE)) {
688 rb_gc_obj_free_vm_weak_references(obj);
689 rb_gc_obj_free(objspace, obj);
690 }
691
692 return 0;
693}
694
695// Shutdown
696static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
697
698void
699rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
700{
701 mmtk_set_gc_enabled(false);
702 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
703 mmtk_set_gc_enabled(true);
704}
705
706// GC
707void
708rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
709{
710 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
711}
712
713bool
714rb_gc_impl_during_gc_p(void *objspace_ptr)
715{
716 struct objspace *objspace = objspace_ptr;
717 return objspace->world_stopped;
718}
719
720static void
721rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
722{
723 rb_gc_prepare_heap_process_object((VALUE)obj);
724}
725
726void
727rb_gc_impl_prepare_heap(void *objspace_ptr)
728{
729 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
730}
731
732void
733rb_gc_impl_gc_enable(void *objspace_ptr)
734{
735 mmtk_set_gc_enabled(true);
736}
737
738void
739rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
740{
741 mmtk_set_gc_enabled(false);
742}
743
744bool
745rb_gc_impl_gc_enabled_p(void *objspace_ptr)
746{
747 return mmtk_gc_enabled_p();
748}
749
750void
751rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
752{
753 struct objspace *objspace = objspace_ptr;
754
755 objspace->gc_stress = RTEST(flag);
756}
757
758VALUE
759rb_gc_impl_stress_get(void *objspace_ptr)
760{
761 struct objspace *objspace = objspace_ptr;
762
763 return objspace->gc_stress ? Qtrue : Qfalse;
764}
765
766VALUE
767rb_gc_impl_config_get(void *objspace_ptr)
768{
769 VALUE hash = rb_hash_new();
770
771 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
772 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
773 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
774 size_t heap_min = mmtk_heap_min();
775 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
776 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
777
778 return hash;
779}
780
781void
782rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
783{
784 // TODO
785}
786
787// Object allocation
788
789static VALUE
790rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
791{
792 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
793 if (bump_pointer == NULL) return 0;
794
795 uintptr_t new_cursor = bump_pointer->cursor + size;
796
797 if (new_cursor > bump_pointer->limit) {
798 return 0;
799 }
800 else {
801 VALUE obj = (VALUE)bump_pointer->cursor;
802 bump_pointer->cursor = new_cursor;
803 return obj;
804 }
805}
806
807static bool
808obj_can_parallel_free_p(VALUE obj)
809{
810 switch (RB_BUILTIN_TYPE(obj)) {
811 case T_ARRAY:
812 case T_BIGNUM:
813 case T_COMPLEX:
814 case T_FLOAT:
815 case T_HASH:
816 case T_OBJECT:
817 case T_RATIONAL:
818 case T_REGEXP:
819 case T_STRING:
820 case T_STRUCT:
821 case T_SYMBOL:
822 return true;
823 default:
824 return false;
825 }
826}
827
828static void
829mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache)
830{
831 if (cache->obj_free_parallel_count > 0) {
832 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
833 cache->obj_free_parallel_count, true);
834 cache->obj_free_parallel_count = 0;
835 }
836 if (cache->obj_free_non_parallel_count > 0) {
837 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
838 cache->obj_free_non_parallel_count, false);
839 cache->obj_free_non_parallel_count = 0;
840 }
841}
842
843static inline void
844mmtk_buffer_obj_free_candidate(struct MMTk_ractor_cache *cache, VALUE obj)
845{
846 if (obj_can_parallel_free_p(obj)) {
847 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
848 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
849 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
850 cache->obj_free_parallel_count, true);
851 cache->obj_free_parallel_count = 0;
852 }
853 }
854 else {
855 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
856 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
857 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
858 cache->obj_free_non_parallel_count, false);
859 cache->obj_free_non_parallel_count = 0;
860 }
861 }
862}
863
864VALUE
865rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
866{
867#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
868 struct objspace *objspace = objspace_ptr;
869 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
870
871 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
872 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
873 if (alloc_size == heap_sizes[i]) break;
874 if (alloc_size < heap_sizes[i]) {
875 alloc_size = heap_sizes[i];
876 break;
877 }
878 }
879
880 if (objspace->gc_stress) {
881 mmtk_handle_user_collection_request(ractor_cache, false, false);
882 }
883
884 alloc_size += sizeof(VALUE);
885
886 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
887 if (!alloc_obj) {
888 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
889 }
890
891 alloc_obj++;
892 alloc_obj[-1] = alloc_size - sizeof(VALUE);
893 alloc_obj[0] = flags;
894 alloc_obj[1] = klass;
895
896 // TODO: implement fast path for mmtk_post_alloc
897 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
898
899 // TODO: only add when object needs obj_free to be called
900 mmtk_buffer_obj_free_candidate(ractor_cache, (VALUE)alloc_obj);
901
902 objspace->total_allocated_objects++;
903
904 return (VALUE)alloc_obj;
905}
906
907size_t
908rb_gc_impl_obj_slot_size(VALUE obj)
909{
910 return ((VALUE *)obj)[-1];
911}
912
913size_t
914rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
915{
916 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
917 if (size == heap_sizes[i]) return i;
918 if (size < heap_sizes[i]) return i;
919 }
920
921 rb_bug("size too big");
922}
923
924bool
925rb_gc_impl_size_allocatable_p(size_t size)
926{
927 return size <= MMTK_MAX_OBJ_SIZE;
928}
929
930// Malloc
931void *
932rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
933{
934 // TODO: don't use system malloc
935 return malloc(size);
936}
937
938void *
939rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
940{
941 // TODO: don't use system calloc
942 return calloc(1, size);
943}
944
945void *
946rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
947{
948 // TODO: don't use system realloc
949 return realloc(ptr, new_size);
950}
951
952void
953rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
954{
955 // TODO: don't use system free
956 free(ptr);
957}
958
959void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
960
961// Marking
962static inline VALUE
963rb_mmtk_call_object_closure(VALUE obj, bool pin)
964{
965 if (RB_UNLIKELY(RB_BUILTIN_TYPE(obj) == T_NONE)) {
966 const size_t info_size = 256;
967 char obj_info_buf[info_size];
968 rb_raw_obj_info(obj_info_buf, info_size, obj);
969
970 char parent_obj_info_buf[info_size];
971 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
972
973 rb_mmtk_gc_thread_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
974 }
975
976 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
977 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
978 rb_mmtk_gc_thread_tls->gc_context,
979 (MMTk_ObjectReference)obj,
980 pin
981 );
982}
983
984void
985rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
986{
987 if (RB_SPECIAL_CONST_P(obj)) return;
988
989 rb_mmtk_call_object_closure(obj, false);
990}
991
992void
993rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
994{
995 if (RB_SPECIAL_CONST_P(*ptr)) return;
996
997 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
998 if (new_obj != *ptr) {
999 *ptr = new_obj;
1000 }
1001}
1002
1003void
1004rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
1005{
1006 if (RB_SPECIAL_CONST_P(obj)) return;
1007
1008 rb_mmtk_call_object_closure(obj, true);
1009}
1010
1011void
1012rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
1013{
1014 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
1015 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1016 }
1017}
1018
1019void
1020rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
1021{
1023 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1024}
1025
1026bool
1027rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
1028{
1029 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1030}
1031
1032// Compaction
1033void
1034rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
1035{
1036 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1037}
1038
1039bool
1040rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
1041{
1042 return rb_mmtk_call_object_closure(obj, false) != obj;
1043}
1044
1045VALUE
1046rb_gc_impl_location(void *objspace_ptr, VALUE obj)
1047{
1048 return rb_mmtk_call_object_closure(obj, false);
1049}
1050
1051// Write barriers
1052void
1053rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
1054{
1055 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1056
1057 if (SPECIAL_CONST_P(b)) return;
1058
1059#ifdef MMTK_DEBUG
1060 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)a)) {
1061 char buff[256];
1062 rb_bug("a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1063 }
1064
1065 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)b)) {
1066 char buff[256];
1067 rb_bug("b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1068 }
1069#endif
1070
1071 MMTK_ASSERT(BUILTIN_TYPE(a) != T_NONE);
1072 MMTK_ASSERT(BUILTIN_TYPE(b) != T_NONE);
1073
1074 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1075}
1076
1077void
1078rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
1079{
1080 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1081}
1082
1083void
1084rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
1085{
1086 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1087
1088 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1089}
1090
1091// Heap walking
1092static void
1093each_objects_i(MMTk_ObjectReference obj, void *d)
1094{
1095 rb_darray(VALUE) *objs = d;
1096
1097 rb_darray_append(objs, (VALUE)obj);
1098}
1099
1100static void
1101each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
1102{
1103 rb_darray(VALUE) objs;
1104 rb_darray_make(&objs, 0);
1105
1106 mmtk_enumerate_objects(each_objects_i, &objs);
1107
1108 VALUE *obj_ptr;
1109 rb_darray_foreach(objs, i, obj_ptr) {
1110 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
1111
1112 if (func(*obj_ptr, data) != 0) {
1113 break;
1114 }
1115 }
1116
1117 rb_darray_free(objs);
1118}
1119
1121 int (*func)(void *, void *, size_t, void *);
1122 void *data;
1123};
1124
1125static int
1126rb_gc_impl_each_objects_i(VALUE obj, void *d)
1127{
1128 struct rb_gc_impl_each_objects_data *data = d;
1129
1130 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1131
1132 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
1133}
1134
1135void
1136rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
1137{
1138 struct rb_gc_impl_each_objects_data each_objects_data = {
1139 .func = func,
1140 .data = data
1141 };
1142
1143 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1144}
1145
1147 void (*func)(VALUE, void *);
1148 void *data;
1149};
1150
1151static int
1152rb_gc_impl_each_object_i(VALUE obj, void *d)
1153{
1154 struct rb_gc_impl_each_object_data *data = d;
1155
1156 data->func(obj, data->data);
1157
1158 return 0;
1159}
1160
1161void
1162rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
1163{
1164 struct rb_gc_impl_each_object_data each_object_data = {
1165 .func = func,
1166 .data = data
1167 };
1168
1169 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1170}
1171
1172// Finalizers
1173static VALUE
1174gc_run_finalizers_get_final(long i, void *data)
1175{
1176 VALUE table = (VALUE)data;
1177
1178 return RARRAY_AREF(table, i + 1);
1179}
1180
1181static void
1182gc_run_finalizers(void *data)
1183{
1184 struct objspace *objspace = data;
1185
1186 rb_gc_set_pending_interrupt();
1187
1188 while (objspace->finalizer_jobs != NULL) {
1189 struct MMTk_final_job *job = objspace->finalizer_jobs;
1190 objspace->finalizer_jobs = job->next;
1191
1192 switch (job->kind) {
1193 case MMTK_FINAL_JOB_DFREE:
1194 job->as.dfree.func(job->as.dfree.data);
1195 break;
1196 case MMTK_FINAL_JOB_FINALIZE: {
1197 VALUE finalizer_array = job->as.finalize.finalizer_array;
1198
1199 rb_gc_run_obj_finalizer(
1200 RARRAY_AREF(finalizer_array, 0),
1201 RARRAY_LEN(finalizer_array) - 1,
1202 gc_run_finalizers_get_final,
1203 (void *)finalizer_array
1204 );
1205
1206 RB_GC_GUARD(finalizer_array);
1207 break;
1208 }
1209 }
1210
1211 xfree(job);
1212 }
1213
1214 rb_gc_unset_pending_interrupt();
1215}
1216
1217void
1218rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1219{
1220 if (dfree == NULL) return;
1221
1222 struct objspace *objspace = objspace_ptr;
1223
1224 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1225 job->kind = MMTK_FINAL_JOB_DFREE;
1226 job->as.dfree.func = dfree;
1227 job->as.dfree.data = data;
1228
1229 struct MMTk_final_job *prev;
1230 do {
1231 job->next = objspace->finalizer_jobs;
1232 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1233 } while (prev != job->next);
1234
1235 if (!ruby_free_at_exit_p()) {
1236 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1237 }
1238}
1239
1240VALUE
1241rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1242{
1243 struct objspace *objspace = objspace_ptr;
1244 VALUE table;
1245 st_data_t data;
1246
1247 RBASIC(obj)->flags |= FL_FINALIZE;
1248
1249 int lev = RB_GC_VM_LOCK();
1250
1251 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1252 table = (VALUE)data;
1253
1254 /* avoid duplicate block, table is usually small */
1255 {
1256 long len = RARRAY_LEN(table);
1257 long i;
1258
1259 for (i = 0; i < len; i++) {
1260 VALUE recv = RARRAY_AREF(table, i);
1261 if (rb_equal(recv, block)) {
1262 RB_GC_VM_UNLOCK(lev);
1263 return recv;
1264 }
1265 }
1266 }
1267
1268 rb_ary_push(table, block);
1269 }
1270 else {
1271 table = rb_ary_new3(2, rb_obj_id(obj), block);
1272 rb_obj_hide(table);
1273 st_add_direct(objspace->finalizer_table, obj, table);
1274 }
1275
1276 RB_GC_VM_UNLOCK(lev);
1277
1278 return block;
1279}
1280
1281void
1282rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1283{
1284 struct objspace *objspace = objspace_ptr;
1285
1286 st_data_t data = obj;
1287
1288 int lev = RB_GC_VM_LOCK();
1289 st_delete(objspace->finalizer_table, &data, 0);
1290 RB_GC_VM_UNLOCK(lev);
1291
1292 FL_UNSET(obj, FL_FINALIZE);
1293}
1294
1295void
1296rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1297{
1298 struct objspace *objspace = objspace_ptr;
1299 VALUE table;
1300 st_data_t data;
1301
1302 if (!FL_TEST(obj, FL_FINALIZE)) return;
1303
1304 int lev = RB_GC_VM_LOCK();
1305 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1306 table = rb_ary_dup((VALUE)data);
1307 RARRAY_ASET(table, 0, rb_obj_id(dest));
1308 st_insert(objspace->finalizer_table, dest, table);
1309 FL_SET(dest, FL_FINALIZE);
1310 }
1311 else {
1312 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1313 }
1314 RB_GC_VM_UNLOCK(lev);
1315}
1316
1317static int
1318move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1319{
1320 struct objspace *objspace = (struct objspace *)arg;
1321
1322 make_final_job(objspace, (VALUE)key, (VALUE)val);
1323
1324 return ST_DELETE;
1325}
1326
1327void
1328rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1329{
1330 struct objspace *objspace = objspace_ptr;
1331
1332 while (objspace->finalizer_table->num_entries) {
1333 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1334
1335 gc_run_finalizers(objspace);
1336 }
1337
1338 unsigned int lev = RB_GC_VM_LOCK();
1339 {
1340 struct MMTk_ractor_cache *rc;
1341 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
1342 mmtk_flush_obj_free_buffer(rc);
1343 }
1344
1345 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1346 for (size_t i = 0; i < registered_candidates.len; i++) {
1347 VALUE obj = (VALUE)registered_candidates.ptr[i];
1348
1349 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1350 rb_gc_obj_free(objspace_ptr, obj);
1351 RBASIC(obj)->flags = 0;
1352 }
1353 }
1354 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1355 }
1356 RB_GC_VM_UNLOCK(lev);
1357
1358 gc_run_finalizers(objspace);
1359}
1360
1361// Forking
1362
1363void
1364rb_gc_impl_before_fork(void *objspace_ptr)
1365{
1366 struct objspace *objspace = objspace_ptr;
1367
1368 retry:
1369 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1370 rb_gc_vm_barrier();
1371
1372 /* At this point, we know that all the Ractors are paused because of the
1373 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1374 * one or more Ractors could be paused there. However, mmtk_before_fork is
1375 * not compatible with that because it assumes that the MMTk workers are idle,
1376 * but the workers are not idle because they are busy working on a GC.
1377 *
1378 * This essentially implements a trylock. It will optimistically lock but will
1379 * release the lock if it detects that any other Ractors are waiting in
1380 * rb_mmtk_block_for_gc.
1381 */
1382 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1383 if (mutator_blocking_count != 0) {
1384 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1385 goto retry;
1386 }
1387
1388 mmtk_before_fork();
1389}
1390
1391void
1392rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1393{
1394 struct objspace *objspace = objspace_ptr;
1395
1396 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1397
1398 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1399}
1400
1401// Statistics
1402
1403void
1404rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1405{
1406 struct objspace *objspace = objspace_ptr;
1407
1408 objspace->measure_gc_time = RTEST(flag);
1409}
1410
1411bool
1412rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1413{
1414 struct objspace *objspace = objspace_ptr;
1415
1416 return objspace->measure_gc_time;
1417}
1418
1419unsigned long long
1420rb_gc_impl_get_total_time(void *objspace_ptr)
1421{
1422 struct objspace *objspace = objspace_ptr;
1423
1424 return objspace->total_gc_time;
1425}
1426
1427size_t
1428rb_gc_impl_gc_count(void *objspace_ptr)
1429{
1430 struct objspace *objspace = objspace_ptr;
1431
1432 return objspace->gc_count;
1433}
1434
1435VALUE
1436rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1437{
1438 VALUE hash = Qnil, key = Qnil;
1439
1440 if (SYMBOL_P(hash_or_key)) {
1441 key = hash_or_key;
1442 }
1443 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1444 hash = hash_or_key;
1445 }
1446 else {
1447 rb_bug("gc_info_decode: non-hash or symbol given");
1448 }
1449
1450#define SET(name, attr) \
1451 if (key == ID2SYM(rb_intern_const(#name))) \
1452 return (attr); \
1453 else if (hash != Qnil) \
1454 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1455
1456 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1457 * the :state key and expects a result. This always returns the :none state. */
1458 SET(state, ID2SYM(rb_intern_const("none")));
1459#undef SET
1460
1461 if (!NIL_P(key)) {
1462 // Matched key should return above
1463 return Qundef;
1464 }
1465
1466 return hash;
1467}
1468
1469enum gc_stat_sym {
1470 gc_stat_sym_count,
1471 gc_stat_sym_moving_gc_count,
1472 gc_stat_sym_time,
1473 gc_stat_sym_total_allocated_objects,
1474 gc_stat_sym_total_bytes,
1475 gc_stat_sym_used_bytes,
1476 gc_stat_sym_free_bytes,
1477 gc_stat_sym_starting_heap_address,
1478 gc_stat_sym_last_heap_address,
1479 gc_stat_sym_weak_references_count,
1480 gc_stat_sym_last
1481};
1482
1483static VALUE gc_stat_symbols[gc_stat_sym_last];
1484
1485static void
1486setup_gc_stat_symbols(void)
1487{
1488 if (gc_stat_symbols[0] == 0) {
1489#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1490 S(count);
1491 S(moving_gc_count);
1492 S(time);
1493 S(total_allocated_objects);
1494 S(total_bytes);
1495 S(used_bytes);
1496 S(free_bytes);
1497 S(starting_heap_address);
1498 S(last_heap_address);
1499 S(weak_references_count);
1500 }
1501}
1502
1503VALUE
1504rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1505{
1506 struct objspace *objspace = objspace_ptr;
1507 VALUE hash = Qnil, key = Qnil;
1508
1509 setup_gc_stat_symbols();
1510
1511 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1512 hash = hash_or_sym;
1513 }
1514 else if (SYMBOL_P(hash_or_sym)) {
1515 key = hash_or_sym;
1516 }
1517 else {
1518 rb_bug("non-hash or symbol given");
1519 }
1520
1521#define SET(name, attr) \
1522 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1523 return SIZET2NUM(attr); \
1524 else if (hash != Qnil) \
1525 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1526
1527 SET(count, objspace->gc_count);
1528 SET(moving_gc_count, objspace->moving_gc_count);
1529 SET(time, objspace->total_gc_time / (1000 * 1000));
1530 SET(total_allocated_objects, objspace->total_allocated_objects);
1531 SET(total_bytes, mmtk_total_bytes());
1532 SET(used_bytes, mmtk_used_bytes());
1533 SET(free_bytes, mmtk_free_bytes());
1534 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1535 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1536 SET(weak_references_count, mmtk_weak_references_count());
1537#undef SET
1538
1539 if (!NIL_P(key)) {
1540 // Matched key should return above
1541 return Qundef;
1542 }
1543
1544 return hash;
1545}
1546
1547VALUE
1548rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1549{
1550 if (FIXNUM_P(heap_name) && SYMBOL_P(hash_or_sym)) {
1551 int heap_idx = FIX2INT(heap_name);
1552 if (heap_idx < 0 || heap_idx >= MMTK_HEAP_COUNT) {
1553 rb_raise(rb_eArgError, "size pool index out of range");
1554 }
1555
1556 if (hash_or_sym == ID2SYM(rb_intern("slot_size"))) {
1557 return SIZET2NUM(heap_sizes[heap_idx]);
1558 }
1559
1560 return Qundef;
1561 }
1562
1563 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1564 return hash_or_sym;
1565 }
1566
1567 return Qundef;
1568}
1569
1570// Miscellaneous
1571
1572#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1573static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1574
1576rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1577{
1578 static ID ID_object_id;
1579
1580 if (!ID_object_id) {
1581#define I(s) ID_##s = rb_intern(#s);
1582 I(object_id);
1583#undef I
1584 }
1585
1586 size_t n = 0;
1587
1588#define SET_ENTRY(na, v) do { \
1589 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1590 object_metadata_entries[n].name = ID_##na; \
1591 object_metadata_entries[n].val = v; \
1592 n++; \
1593} while (0)
1594
1595 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1596
1597 object_metadata_entries[n].name = 0;
1598 object_metadata_entries[n].val = 0;
1599
1600 return object_metadata_entries;
1601}
1602
1603bool
1604rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1605{
1606 if (ptr == NULL) return false;
1607 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1608 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1609}
1610
1611bool
1612rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1613{
1614 return false;
1615}
1616
1617void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1618
1619void
1620rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1621{
1622 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1623 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1624 }
1625
1626 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1627}
1628
1629// GC Identification
1630
1631const char *
1632rb_gc_impl_active_gc_name(void)
1633{
1634 return "mmtk";
1635}
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1916
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1882
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:561
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:226
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:95
VALUE rb_mGC
GC module.
Definition gc.c:429
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:141
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:859
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376