Ruby 4.1.0dev (2026-05-05 revision 6074e53703cf22b837c58f010c99651de7ad442f)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t moving_gc_count;
25 size_t total_gc_time;
26 size_t total_allocated_objects;
27
28 st_table *finalizer_table;
29 struct MMTk_final_job *finalizer_jobs;
30 rb_postponed_job_handle_t finalizer_postponed_job;
31
32 struct ccan_list_head ractor_caches;
33 unsigned long live_ractor_cache_count;
34
35 pthread_mutex_t mutex;
36 rb_atomic_t mutator_blocking_count;
37 bool world_stopped;
38 pthread_cond_t cond_world_stopped;
39 pthread_cond_t cond_world_started;
40 size_t start_the_world_count;
41
42 struct {
43 bool gc_thread_crashed;
44 char crash_msg[256];
45 } crash_context;
46
47 struct rb_gc_vm_context vm_context;
48
49 unsigned int fork_hook_vm_lock_lev;
50};
51
52#define OBJ_FREE_BUF_CAPACITY 128
53
55 struct ccan_list_node list_node;
56
57 MMTk_Mutator *mutator;
58 bool gc_mutator_p;
59
60 MMTk_BumpPointer *bump_pointer;
61
62 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
63 size_t obj_free_parallel_count;
64 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
65 size_t obj_free_non_parallel_count;
66};
67
69 struct MMTk_final_job *next;
70 enum {
71 MMTK_FINAL_JOB_DFREE,
72 MMTK_FINAL_JOB_FINALIZE,
73 } kind;
74 union {
75 struct {
76 void (*func)(void *);
77 void *data;
78 } dfree;
79 struct {
80 /* HACK: we store the object ID on the 0th element of this array. */
81 VALUE finalizer_array;
82 } finalize;
83 } as;
84};
85
86#ifdef RB_THREAD_LOCAL_SPECIFIER
87RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
88
89RB_THREAD_LOCAL_SPECIFIER VALUE marking_parent_object;
90#else
91# error We currently need language-supported TLS
92#endif
93
94#ifdef MMTK_DEBUG
95# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
96#else
97# define MMTK_ASSERT(expr, ...) ((void)0)
98#endif
99
100#include <pthread.h>
101
102static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
103
104static void
105rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
106{
107 rb_mmtk_gc_thread_tls = gc_thread_tls;
108}
109
110static bool
111rb_mmtk_is_mutator(void)
112{
113 return ruby_native_thread_p();
114}
115
116static void
117rb_mmtk_stop_the_world(void)
118{
119 struct objspace *objspace = rb_gc_get_objspace();
120
121 int err;
122 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
123 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
124 }
125
126 while (!objspace->world_stopped) {
127 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
128 }
129
130 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
131 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
132 }
133}
134
135static void
136rb_mmtk_resume_mutators(bool current_gc_may_move)
137{
138 struct objspace *objspace = rb_gc_get_objspace();
139
140 int err;
141 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
142 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
143 }
144
145 objspace->world_stopped = false;
146 objspace->gc_count++;
147 if (current_gc_may_move) objspace->moving_gc_count++;
148 pthread_cond_broadcast(&objspace->cond_world_started);
149
150 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
151 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
152 }
153}
154
155static void mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache);
156
157static void
158rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
159{
160 struct objspace *objspace = rb_gc_get_objspace();
161
162 size_t starting_gc_count = objspace->gc_count;
163 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
164 int lock_lev = RB_GC_VM_LOCK();
165 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
166 int err;
167 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
168 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
169 }
170
171 if (objspace->gc_count == starting_gc_count) {
172 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
173
174 rb_gc_initialize_vm_context(&objspace->vm_context);
175
176 mutator->gc_mutator_p = true;
177
178 struct timespec gc_start_time;
179 if (objspace->measure_gc_time) {
180 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
181 }
182
183 rb_gc_save_machine_context();
184
185 rb_gc_vm_barrier();
186
187 struct MMTk_ractor_cache *rc;
188 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
189 mmtk_flush_obj_free_buffer(rc);
190 }
191
192 objspace->world_stopped = true;
193
194 pthread_cond_broadcast(&objspace->cond_world_stopped);
195
196 // Wait for GC end
197 while (objspace->world_stopped) {
198 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
199 }
200
201 if (RB_UNLIKELY(objspace->crash_context.gc_thread_crashed)) {
202 rb_bug("%s", objspace->crash_context.crash_msg);
203 }
204
205 if (objspace->measure_gc_time) {
206 struct timespec gc_end_time;
207 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
208
209 objspace->total_gc_time +=
210 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
211 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
212 }
213 }
214
215 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
216 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
217 }
218 RB_GC_VM_UNLOCK(lock_lev);
219}
220
221static void
222rb_mmtk_before_updating_jit_code(void)
223{
224 rb_gc_before_updating_jit_code();
225}
226
227static void
228rb_mmtk_after_updating_jit_code(void)
229{
230 rb_gc_after_updating_jit_code();
231}
232
233static size_t
234rb_mmtk_number_of_mutators(void)
235{
236 struct objspace *objspace = rb_gc_get_objspace();
237 return objspace->live_ractor_cache_count;
238}
239
240static void
241rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
242{
243 struct objspace *objspace = rb_gc_get_objspace();
244 struct MMTk_ractor_cache *ractor_cache;
245
246 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
247 visit_mutator(ractor_cache->mutator, data);
248 }
249}
250
251static void
252rb_mmtk_scan_gc_roots(void)
253{
254 struct objspace *objspace = rb_gc_get_objspace();
255
256 rb_gc_mark_roots(objspace, NULL);
257}
258
259static int
260pin_value(st_data_t key, st_data_t value, st_data_t data)
261{
262 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
263
264 return ST_CONTINUE;
265}
266
267static void
268rb_mmtk_scan_objspace(void)
269{
270 struct objspace *objspace = rb_gc_get_objspace();
271
272 if (objspace->finalizer_table != NULL) {
273 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
274 }
275
276 struct MMTk_final_job *job = objspace->finalizer_jobs;
277 while (job != NULL) {
278 switch (job->kind) {
279 case MMTK_FINAL_JOB_DFREE:
280 break;
281 case MMTK_FINAL_JOB_FINALIZE:
282 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
283 break;
284 default:
285 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
286 }
287
288 job = job->next;
289 }
290}
291
292static void
293rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
294{
295 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
296}
297
298static void
299rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
300{
301 VALUE object = (VALUE)mmtk_object;
302
303 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
304 marking_parent_object = object;
305 rb_gc_update_object_references(rb_gc_get_objspace(), object);
306 marking_parent_object = 0;
307 }
308}
309
310static void
311rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
312{
313 marking_parent_object = (VALUE)object;
314 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
315 marking_parent_object = 0;
316}
317
318static void
319rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
320{
321 VALUE object = (VALUE)mmtk_object;
322
323 marking_parent_object = object;
324
325 rb_gc_handle_weak_references(object);
326
327 if (moving) {
328 rb_gc_update_object_references(rb_gc_get_objspace(), object);
329 }
330
331 marking_parent_object = 0;
332}
333
334static void
335rb_mmtk_call_obj_free(MMTk_ObjectReference object)
336{
337 VALUE obj = (VALUE)object;
338 struct objspace *objspace = rb_gc_get_objspace();
339
340 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
341 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
342 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
343 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
344 }
345
346 rb_gc_obj_free(objspace, obj);
347
348#ifdef MMTK_DEBUG
349 memset((void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
350#endif
351}
352
353static size_t
354rb_mmtk_vm_live_bytes(void)
355{
356 return 0;
357}
358
359static void
360make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
361{
362 MMTK_ASSERT(RB_BUILTIN_TYPE(table) == T_ARRAY);
363
364 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
365 job->next = objspace->finalizer_jobs;
366 job->kind = MMTK_FINAL_JOB_FINALIZE;
367 job->as.finalize.finalizer_array = table;
368
369 objspace->finalizer_jobs = job;
370}
371
372static int
373rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data, int error)
374{
375 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
376 MMTK_ASSERT(RB_BUILTIN_TYPE(value) == T_ARRAY);
377
378 struct objspace *objspace = (struct objspace *)data;
379
380 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
381 VALUE new_key_location = rb_mmtk_call_object_closure((VALUE)key, false);
382
383 MMTK_ASSERT(RB_FL_TEST(new_key_location, RUBY_FL_FINALIZE));
384
385 if (new_key_location != key) {
386 return ST_REPLACE;
387 }
388 }
389 else {
390 make_final_job(objspace, (VALUE)key, (VALUE)value);
391
392 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
393
394 return ST_DELETE;
395 }
396
397 return ST_CONTINUE;
398}
399
400static int
401rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
402{
403 *key = rb_mmtk_call_object_closure((VALUE)*key, false);
404
405 return ST_CONTINUE;
406}
407
408static void
409rb_mmtk_update_finalizer_table(void)
410{
411 struct objspace *objspace = rb_gc_get_objspace();
412
413 st_foreach_with_replace(
414 objspace->finalizer_table,
415 rb_mmtk_update_finalizer_table_i,
416 rb_mmtk_update_finalizer_table_replace_i,
417 (st_data_t)objspace
418 );
419}
420
421static int
422rb_mmtk_global_tables_count(void)
423{
424 return RB_GC_VM_WEAK_TABLE_COUNT;
425}
426
427static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
428
429static int
430rb_mmtk_update_global_tables_i(VALUE val, void *data)
431{
432 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
433 return ST_DELETE;
434 }
435
436 // TODO: check only if in moving GC
437 if (rb_mmtk_call_object_closure(val, false) != val) {
438 return ST_REPLACE;
439 }
440
441 return ST_CONTINUE;
442}
443
444static int
445rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
446{
447 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
448 *ptr = rb_mmtk_call_object_closure(*ptr, false);
449
450 return ST_CONTINUE;
451}
452
453static void
454rb_mmtk_update_global_tables(int table, bool moving)
455{
456 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
457
458 rb_gc_vm_weak_table_foreach(
459 rb_mmtk_update_global_tables_i,
460 rb_mmtk_update_global_tables_replace_i,
461 NULL,
462 !moving,
463 (enum rb_gc_vm_weak_tables)table
464 );
465}
466
467static bool
468rb_mmtk_special_const_p(MMTk_ObjectReference object)
469{
470 VALUE obj = (VALUE)object;
471
472 return RB_SPECIAL_CONST_P(obj);
473}
474
475RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 1, 2)
476static void
477rb_mmtk_gc_thread_bug(const char *msg, ...)
478{
479 struct objspace *objspace = rb_gc_get_objspace();
480
481 objspace->crash_context.gc_thread_crashed = true;
482
483 va_list args;
484 va_start(args, msg);
485 vsnprintf(objspace->crash_context.crash_msg, sizeof(objspace->crash_context.crash_msg), msg, args);
486 va_end(args);
487
488 fprintf(stderr, "-- GC thread backtrace "
489 "-------------------------------------------\n");
490 rb_gc_print_backtrace();
491 fprintf(stderr, "\n");
492
493 rb_mmtk_resume_mutators(false);
494
495 sleep(5);
496
497 rb_bug("rb_mmtk_gc_thread_bug");
498}
499
500static void
501rb_mmtk_gc_thread_panic_handler(void)
502{
503 rb_mmtk_gc_thread_bug("MMTk GC thread panicked");
504}
505
506static void
507rb_mmtk_mutator_thread_panic_handler(void)
508{
509 rb_bug("Ruby mutator thread panicked");
510}
511
512// Bootup
513MMTk_RubyUpcalls ruby_upcalls = {
514 rb_mmtk_init_gc_worker_thread,
515 rb_mmtk_is_mutator,
516 rb_mmtk_stop_the_world,
517 rb_mmtk_resume_mutators,
518 rb_mmtk_block_for_gc,
519 rb_mmtk_before_updating_jit_code,
520 rb_mmtk_after_updating_jit_code,
521 rb_mmtk_number_of_mutators,
522 rb_mmtk_get_mutators,
523 rb_mmtk_scan_gc_roots,
524 rb_mmtk_scan_objspace,
525 rb_mmtk_move_obj_during_marking,
526 rb_mmtk_update_object_references,
527 rb_mmtk_call_gc_mark_children,
528 rb_mmtk_handle_weak_references,
529 rb_mmtk_call_obj_free,
530 rb_mmtk_vm_live_bytes,
531 rb_mmtk_update_global_tables,
532 rb_mmtk_global_tables_count,
533 rb_mmtk_update_finalizer_table,
534 rb_mmtk_special_const_p,
535 rb_mmtk_mutator_thread_panic_handler,
536 rb_mmtk_gc_thread_panic_handler,
537};
538
539// Use max 80% of the available memory by default for MMTk
540#define RB_MMTK_HEAP_LIMIT_PERC 80
541#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
542#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
543
544enum mmtk_heap_mode {
545 RB_MMTK_DYNAMIC_HEAP,
546 RB_MMTK_FIXED_HEAP
547};
548
549MMTk_Builder *
550rb_mmtk_builder_init(void)
551{
552 MMTk_Builder *builder = mmtk_builder_default();
553 return builder;
554}
555
556void *
557rb_gc_impl_objspace_alloc(void)
558{
559 MMTk_Builder *builder = rb_mmtk_builder_init();
560 mmtk_init_binding(builder, NULL, &ruby_upcalls);
561
562 return calloc(1, sizeof(struct objspace));
563}
564
565static void gc_run_finalizers(void *data);
566
567void
568rb_gc_impl_objspace_init(void *objspace_ptr)
569{
570 struct objspace *objspace = objspace_ptr;
571
572 objspace->measure_gc_time = true;
573
574 objspace->finalizer_table = st_init_numtable();
575 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
576
577 ccan_list_head_init(&objspace->ractor_caches);
578
579 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
580 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
581 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
582}
583
584void
585rb_gc_impl_objspace_free(void *objspace_ptr)
586{
587 free(objspace_ptr);
588}
589
590void *
591rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
592{
593 struct objspace *objspace = objspace_ptr;
594 if (objspace->live_ractor_cache_count == 0) {
595 mmtk_initialize_collection(ractor);
596 }
597 objspace->live_ractor_cache_count++;
598
599 struct MMTk_ractor_cache *cache = calloc(1, sizeof(struct MMTk_ractor_cache));
600 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
601
602 cache->mutator = mmtk_bind_mutator(cache);
603 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
604
605 return cache;
606}
607
608void
609rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
610{
611 struct objspace *objspace = objspace_ptr;
612 struct MMTk_ractor_cache *cache = cache_ptr;
613
614 ccan_list_del(&cache->list_node);
615
616 mmtk_flush_obj_free_buffer(cache);
617
618 if (ruby_free_at_exit_p()) {
619 MMTK_ASSERT(objspace->live_ractor_cache_count > 0);
620 }
621 else {
622 MMTK_ASSERT(objspace->live_ractor_cache_count > 1);
623 }
624
625 objspace->live_ractor_cache_count--;
626
627 mmtk_destroy_mutator(cache->mutator);
628}
629
630void rb_gc_impl_set_params(void *objspace_ptr) { }
631
632static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
633
634#if SIZEOF_VALUE >= 8
635#define MMTK_HEAP_COUNT 12
636#define MMTK_MAX_OBJ_SIZE 1024
637static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
638 32, 40, 64, 80, 96, 128, 160, 256, 512, 640, 768, MMTK_MAX_OBJ_SIZE, 0
639};
640#else
641#define MMTK_HEAP_COUNT 5
642#define MMTK_MAX_OBJ_SIZE 512
643static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
644 32, 64, 128, 256, MMTK_MAX_OBJ_SIZE, 0
645};
646#endif
647
648void
649rb_gc_impl_init(void)
650{
651 VALUE gc_constants = rb_hash_new();
652 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(SIZEOF_VALUE >= 8 ? 64 : 32));
653 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
654 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
655 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
656 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(MMTK_HEAP_COUNT));
657 // TODO: correctly set RVALUE_OLD_AGE when we have generational GC support
658 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), INT2FIX(0));
659 OBJ_FREEZE(gc_constants);
660 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
661
662 // no-ops for compatibility
663 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
664
668 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
669 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
670}
671
672size_t *
673rb_gc_impl_heap_sizes(void *objspace_ptr)
674{
675 return heap_sizes;
676}
677
678int
679rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
680{
681 struct objspace *objspace = data;
682
683 if (!RB_TYPE_P(obj, T_NONE)) {
684 rb_gc_obj_free_vm_weak_references(obj);
685 rb_gc_obj_free(objspace, obj);
686 }
687
688 return 0;
689}
690
691// Shutdown
692static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
693
694void
695rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
696{
697 mmtk_set_gc_enabled(false);
698 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
699 mmtk_set_gc_enabled(true);
700}
701
702// GC
703void
704rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
705{
706 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
707}
708
709bool
710rb_gc_impl_during_gc_p(void *objspace_ptr)
711{
712 struct objspace *objspace = objspace_ptr;
713 return objspace->world_stopped;
714}
715
716static void
717rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
718{
719 rb_gc_prepare_heap_process_object((VALUE)obj);
720}
721
722void
723rb_gc_impl_prepare_heap(void *objspace_ptr)
724{
725 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
726}
727
728void
729rb_gc_impl_gc_enable(void *objspace_ptr)
730{
731 mmtk_set_gc_enabled(true);
732}
733
734void
735rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
736{
737 mmtk_set_gc_enabled(false);
738}
739
740bool
741rb_gc_impl_gc_enabled_p(void *objspace_ptr)
742{
743 return mmtk_gc_enabled_p();
744}
745
746void
747rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
748{
749 struct objspace *objspace = objspace_ptr;
750
751 objspace->gc_stress = RTEST(flag);
752}
753
754VALUE
755rb_gc_impl_stress_get(void *objspace_ptr)
756{
757 struct objspace *objspace = objspace_ptr;
758
759 return objspace->gc_stress ? Qtrue : Qfalse;
760}
761
762VALUE
763rb_gc_impl_config_get(void *objspace_ptr)
764{
765 VALUE hash = rb_hash_new();
766
767 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
768 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
769 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
770 size_t heap_min = mmtk_heap_min();
771 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
772 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
773
774 return hash;
775}
776
777void
778rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
779{
780 // TODO
781}
782
783struct rb_gc_vm_context *
784rb_gc_impl_get_vm_context(void *objspace_ptr)
785{
786 struct objspace *objspace = objspace_ptr;
787
788 return &objspace->vm_context;
789}
790
791// Object allocation
792
793static VALUE
794rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
795{
796 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
797 if (bump_pointer == NULL) return 0;
798
799 uintptr_t new_cursor = bump_pointer->cursor + size;
800
801 if (new_cursor > bump_pointer->limit) {
802 return 0;
803 }
804 else {
805 VALUE obj = (VALUE)bump_pointer->cursor;
806 bump_pointer->cursor = new_cursor;
807 return obj;
808 }
809}
810
811static bool
812obj_can_parallel_free_p(VALUE obj)
813{
814 switch (RB_BUILTIN_TYPE(obj)) {
815 case T_ARRAY:
816 case T_BIGNUM:
817 case T_COMPLEX:
818 case T_FLOAT:
819 case T_HASH:
820 case T_OBJECT:
821 case T_RATIONAL:
822 case T_REGEXP:
823 case T_STRING:
824 case T_STRUCT:
825 case T_SYMBOL:
826 return true;
827 default:
828 return false;
829 }
830}
831
832static void
833mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache)
834{
835 if (cache->obj_free_parallel_count > 0) {
836 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
837 cache->obj_free_parallel_count, true);
838 cache->obj_free_parallel_count = 0;
839 }
840 if (cache->obj_free_non_parallel_count > 0) {
841 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
842 cache->obj_free_non_parallel_count, false);
843 cache->obj_free_non_parallel_count = 0;
844 }
845}
846
847static inline void
848mmtk_buffer_obj_free_candidate(struct MMTk_ractor_cache *cache, VALUE obj)
849{
850 if (obj_can_parallel_free_p(obj)) {
851 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
852 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
853 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
854 cache->obj_free_parallel_count, true);
855 cache->obj_free_parallel_count = 0;
856 }
857 }
858 else {
859 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
860 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
861 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
862 cache->obj_free_non_parallel_count, false);
863 cache->obj_free_non_parallel_count = 0;
864 }
865 }
866}
867
868VALUE
869rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
870{
871#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
872 struct objspace *objspace = objspace_ptr;
873 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
874
875 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
876 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
877 if (alloc_size == heap_sizes[i]) break;
878 if (alloc_size < heap_sizes[i]) {
879 alloc_size = heap_sizes[i];
880 break;
881 }
882 }
883
884 if (objspace->gc_stress) {
885 mmtk_handle_user_collection_request(ractor_cache, false, false);
886 }
887
888 alloc_size += sizeof(VALUE);
889
890 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
891 if (!alloc_obj) {
892 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
893 }
894
895 alloc_obj++;
896 alloc_obj[-1] = alloc_size - sizeof(VALUE);
897 alloc_obj[0] = flags;
898 alloc_obj[1] = klass;
899
900 // TODO: implement fast path for mmtk_post_alloc
901 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
902
903 // TODO: only add when object needs obj_free to be called
904 mmtk_buffer_obj_free_candidate(ractor_cache, (VALUE)alloc_obj);
905
906 objspace->total_allocated_objects++;
907
908 return (VALUE)alloc_obj;
909}
910
911size_t
912rb_gc_impl_obj_slot_size(VALUE obj)
913{
914 return ((VALUE *)obj)[-1];
915}
916
917size_t
918rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
919{
920 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
921 if (size == heap_sizes[i]) return i;
922 if (size < heap_sizes[i]) return i;
923 }
924
925 rb_bug("size too big");
926}
927
928bool
929rb_gc_impl_size_allocatable_p(size_t size)
930{
931 return size <= MMTK_MAX_OBJ_SIZE;
932}
933
934// Malloc
935void *
936rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
937{
938 // TODO: don't use system malloc
939 return malloc(size);
940}
941
942void *
943rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
944{
945 // TODO: don't use system calloc
946 return calloc(1, size);
947}
948
949void *
950rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
951{
952 // TODO: don't use system realloc
953 return realloc(ptr, new_size);
954}
955
956void
957rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
958{
959 // TODO: don't use system free
960 free(ptr);
961}
962
963void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
964
965// Marking
966static inline VALUE
967rb_mmtk_call_object_closure(VALUE obj, bool pin)
968{
969 if (RB_UNLIKELY(RB_BUILTIN_TYPE(obj) == T_NONE)) {
970 const size_t info_size = 256;
971 char obj_info_buf[info_size];
972 rb_raw_obj_info(obj_info_buf, info_size, obj);
973
974 char parent_obj_info_buf[info_size];
975 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
976
977 rb_mmtk_gc_thread_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
978 }
979
980 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
981 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
982 rb_mmtk_gc_thread_tls->gc_context,
983 (MMTk_ObjectReference)obj,
984 pin
985 );
986}
987
988void
989rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
990{
991 if (RB_SPECIAL_CONST_P(obj)) return;
992
993 rb_mmtk_call_object_closure(obj, false);
994}
995
996void
997rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
998{
999 if (RB_SPECIAL_CONST_P(*ptr)) return;
1000
1001 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
1002 if (new_obj != *ptr) {
1003 *ptr = new_obj;
1004 }
1005}
1006
1007void
1008rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
1009{
1010 if (RB_SPECIAL_CONST_P(obj)) return;
1011
1012 rb_mmtk_call_object_closure(obj, true);
1013}
1014
1015void
1016rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
1017{
1018 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
1019 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1020 }
1021}
1022
1023void
1024rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
1025{
1027 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1028}
1029
1030bool
1031rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
1032{
1033 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1034}
1035
1036// Compaction
1037void
1038rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
1039{
1040 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1041}
1042
1043bool
1044rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
1045{
1046 return rb_mmtk_call_object_closure(obj, false) != obj;
1047}
1048
1049VALUE
1050rb_gc_impl_location(void *objspace_ptr, VALUE obj)
1051{
1052 return rb_mmtk_call_object_closure(obj, false);
1053}
1054
1055// Write barriers
1056void
1057rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
1058{
1059 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1060
1061 if (SPECIAL_CONST_P(b)) return;
1062
1063#ifdef MMTK_DEBUG
1064 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)a)) {
1065 char buff[256];
1066 rb_bug("a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1067 }
1068
1069 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)b)) {
1070 char buff[256];
1071 rb_bug("b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1072 }
1073#endif
1074
1075 MMTK_ASSERT(BUILTIN_TYPE(a) != T_NONE);
1076 MMTK_ASSERT(BUILTIN_TYPE(b) != T_NONE);
1077
1078 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1079}
1080
1081void
1082rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
1083{
1084 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1085}
1086
1087void
1088rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
1089{
1090 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1091
1092 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1093}
1094
1095// Heap walking
1096static void
1097each_objects_i(MMTk_ObjectReference obj, void *d)
1098{
1099 rb_darray(VALUE) *objs = d;
1100
1101 rb_darray_append(objs, (VALUE)obj);
1102}
1103
1104static void
1105each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
1106{
1107 rb_darray(VALUE) objs;
1108 rb_darray_make(&objs, 0);
1109
1110 mmtk_enumerate_objects(each_objects_i, &objs);
1111
1112 VALUE *obj_ptr;
1113 rb_darray_foreach(objs, i, obj_ptr) {
1114 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
1115
1116 if (func(*obj_ptr, data) != 0) {
1117 break;
1118 }
1119 }
1120
1121 rb_darray_free(objs);
1122}
1123
1125 int (*func)(void *, void *, size_t, void *);
1126 void *data;
1127};
1128
1129static int
1130rb_gc_impl_each_objects_i(VALUE obj, void *d)
1131{
1132 struct rb_gc_impl_each_objects_data *data = d;
1133
1134 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1135
1136 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
1137}
1138
1139void
1140rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
1141{
1142 struct rb_gc_impl_each_objects_data each_objects_data = {
1143 .func = func,
1144 .data = data
1145 };
1146
1147 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1148}
1149
1151 void (*func)(VALUE, void *);
1152 void *data;
1153};
1154
1155static int
1156rb_gc_impl_each_object_i(VALUE obj, void *d)
1157{
1158 struct rb_gc_impl_each_object_data *data = d;
1159
1160 data->func(obj, data->data);
1161
1162 return 0;
1163}
1164
1165void
1166rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
1167{
1168 struct rb_gc_impl_each_object_data each_object_data = {
1169 .func = func,
1170 .data = data
1171 };
1172
1173 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1174}
1175
1176// Finalizers
1177static VALUE
1178gc_run_finalizers_get_final(long i, void *data)
1179{
1180 VALUE table = (VALUE)data;
1181
1182 return RARRAY_AREF(table, i + 1);
1183}
1184
1185static void
1186gc_run_finalizers(void *data)
1187{
1188 struct objspace *objspace = data;
1189
1190 rb_gc_set_pending_interrupt();
1191
1192 while (objspace->finalizer_jobs != NULL) {
1193 struct MMTk_final_job *job = objspace->finalizer_jobs;
1194 objspace->finalizer_jobs = job->next;
1195
1196 switch (job->kind) {
1197 case MMTK_FINAL_JOB_DFREE:
1198 job->as.dfree.func(job->as.dfree.data);
1199 break;
1200 case MMTK_FINAL_JOB_FINALIZE: {
1201 VALUE finalizer_array = job->as.finalize.finalizer_array;
1202
1203 rb_gc_run_obj_finalizer(
1204 RARRAY_AREF(finalizer_array, 0),
1205 RARRAY_LEN(finalizer_array) - 1,
1206 gc_run_finalizers_get_final,
1207 (void *)finalizer_array
1208 );
1209
1210 RB_GC_GUARD(finalizer_array);
1211 break;
1212 }
1213 }
1214
1215 xfree(job);
1216 }
1217
1218 rb_gc_unset_pending_interrupt();
1219}
1220
1221void
1222rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1223{
1224 if (dfree == NULL) return;
1225
1226 struct objspace *objspace = objspace_ptr;
1227
1228 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1229 job->kind = MMTK_FINAL_JOB_DFREE;
1230 job->as.dfree.func = dfree;
1231 job->as.dfree.data = data;
1232
1233 struct MMTk_final_job *prev;
1234 do {
1235 job->next = objspace->finalizer_jobs;
1236 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1237 } while (prev != job->next);
1238
1239 if (!ruby_free_at_exit_p()) {
1240 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1241 }
1242}
1243
1244VALUE
1245rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1246{
1247 struct objspace *objspace = objspace_ptr;
1248 VALUE table;
1249 st_data_t data;
1250
1251 RBASIC(obj)->flags |= FL_FINALIZE;
1252
1253 int lev = RB_GC_VM_LOCK();
1254
1255 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1256 table = (VALUE)data;
1257
1258 /* avoid duplicate block, table is usually small */
1259 {
1260 long len = RARRAY_LEN(table);
1261 long i;
1262
1263 for (i = 0; i < len; i++) {
1264 VALUE recv = RARRAY_AREF(table, i);
1265 if (rb_equal(recv, block)) {
1266 RB_GC_VM_UNLOCK(lev);
1267 return recv;
1268 }
1269 }
1270 }
1271
1272 rb_ary_push(table, block);
1273 }
1274 else {
1275 table = rb_ary_new3(2, rb_obj_id(obj), block);
1276 rb_obj_hide(table);
1277 st_add_direct(objspace->finalizer_table, obj, table);
1278 }
1279
1280 RB_GC_VM_UNLOCK(lev);
1281
1282 return block;
1283}
1284
1285void
1286rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1287{
1288 struct objspace *objspace = objspace_ptr;
1289
1290 st_data_t data = obj;
1291
1292 int lev = RB_GC_VM_LOCK();
1293 st_delete(objspace->finalizer_table, &data, 0);
1294 RB_GC_VM_UNLOCK(lev);
1295
1296 FL_UNSET(obj, FL_FINALIZE);
1297}
1298
1299void
1300rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1301{
1302 struct objspace *objspace = objspace_ptr;
1303 VALUE table;
1304 st_data_t data;
1305
1306 if (!FL_TEST(obj, FL_FINALIZE)) return;
1307
1308 int lev = RB_GC_VM_LOCK();
1309 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1310 table = rb_ary_dup((VALUE)data);
1311 RARRAY_ASET(table, 0, rb_obj_id(dest));
1312 st_insert(objspace->finalizer_table, dest, table);
1313 FL_SET(dest, FL_FINALIZE);
1314 }
1315 else {
1316 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1317 }
1318 RB_GC_VM_UNLOCK(lev);
1319}
1320
1321static int
1322move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1323{
1324 struct objspace *objspace = (struct objspace *)arg;
1325
1326 make_final_job(objspace, (VALUE)key, (VALUE)val);
1327
1328 return ST_DELETE;
1329}
1330
1331void
1332rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1333{
1334 struct objspace *objspace = objspace_ptr;
1335
1336 while (objspace->finalizer_table->num_entries) {
1337 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1338
1339 gc_run_finalizers(objspace);
1340 }
1341
1342 unsigned int lev = RB_GC_VM_LOCK();
1343 {
1344 struct MMTk_ractor_cache *rc;
1345 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
1346 mmtk_flush_obj_free_buffer(rc);
1347 }
1348
1349 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1350 for (size_t i = 0; i < registered_candidates.len; i++) {
1351 VALUE obj = (VALUE)registered_candidates.ptr[i];
1352
1353 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1354 rb_gc_obj_free(objspace_ptr, obj);
1355 RBASIC(obj)->flags = 0;
1356 }
1357 }
1358 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1359 }
1360 RB_GC_VM_UNLOCK(lev);
1361
1362 gc_run_finalizers(objspace);
1363}
1364
1365// Forking
1366
1367void
1368rb_gc_impl_before_fork(void *objspace_ptr)
1369{
1370 struct objspace *objspace = objspace_ptr;
1371
1372 retry:
1373 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1374 rb_gc_vm_barrier();
1375
1376 /* At this point, we know that all the Ractors are paused because of the
1377 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1378 * one or more Ractors could be paused there. However, mmtk_before_fork is
1379 * not compatible with that because it assumes that the MMTk workers are idle,
1380 * but the workers are not idle because they are busy working on a GC.
1381 *
1382 * This essentially implements a trylock. It will optimistically lock but will
1383 * release the lock if it detects that any other Ractors are waiting in
1384 * rb_mmtk_block_for_gc.
1385 */
1386 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1387 if (mutator_blocking_count != 0) {
1388 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1389 goto retry;
1390 }
1391
1392 mmtk_before_fork();
1393}
1394
1395void
1396rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1397{
1398 struct objspace *objspace = objspace_ptr;
1399
1400 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1401
1402 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1403}
1404
1405// Statistics
1406
1407void
1408rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1409{
1410 struct objspace *objspace = objspace_ptr;
1411
1412 objspace->measure_gc_time = RTEST(flag);
1413}
1414
1415bool
1416rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1417{
1418 struct objspace *objspace = objspace_ptr;
1419
1420 return objspace->measure_gc_time;
1421}
1422
1423unsigned long long
1424rb_gc_impl_get_total_time(void *objspace_ptr)
1425{
1426 struct objspace *objspace = objspace_ptr;
1427
1428 return objspace->total_gc_time;
1429}
1430
1431size_t
1432rb_gc_impl_gc_count(void *objspace_ptr)
1433{
1434 struct objspace *objspace = objspace_ptr;
1435
1436 return objspace->gc_count;
1437}
1438
1439VALUE
1440rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1441{
1442 VALUE hash = Qnil, key = Qnil;
1443
1444 if (SYMBOL_P(hash_or_key)) {
1445 key = hash_or_key;
1446 }
1447 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1448 hash = hash_or_key;
1449 }
1450 else {
1451 rb_bug("gc_info_decode: non-hash or symbol given");
1452 }
1453
1454#define SET(name, attr) \
1455 if (key == ID2SYM(rb_intern_const(#name))) \
1456 return (attr); \
1457 else if (hash != Qnil) \
1458 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1459
1460 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1461 * the :state key and expects a result. This always returns the :none state. */
1462 SET(state, ID2SYM(rb_intern_const("none")));
1463#undef SET
1464
1465 if (!NIL_P(key)) {
1466 // Matched key should return above
1467 return Qundef;
1468 }
1469
1470 return hash;
1471}
1472
1473enum gc_stat_sym {
1474 gc_stat_sym_count,
1475 gc_stat_sym_moving_gc_count,
1476 gc_stat_sym_time,
1477 gc_stat_sym_total_allocated_objects,
1478 gc_stat_sym_total_bytes,
1479 gc_stat_sym_used_bytes,
1480 gc_stat_sym_free_bytes,
1481 gc_stat_sym_starting_heap_address,
1482 gc_stat_sym_last_heap_address,
1483 gc_stat_sym_weak_references_count,
1484 gc_stat_sym_last
1485};
1486
1487static VALUE gc_stat_symbols[gc_stat_sym_last];
1488
1489static void
1490setup_gc_stat_symbols(void)
1491{
1492 if (gc_stat_symbols[0] == 0) {
1493#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1494 S(count);
1495 S(moving_gc_count);
1496 S(time);
1497 S(total_allocated_objects);
1498 S(total_bytes);
1499 S(used_bytes);
1500 S(free_bytes);
1501 S(starting_heap_address);
1502 S(last_heap_address);
1503 S(weak_references_count);
1504 }
1505}
1506
1507VALUE
1508rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1509{
1510 struct objspace *objspace = objspace_ptr;
1511 VALUE hash = Qnil, key = Qnil;
1512
1513 setup_gc_stat_symbols();
1514
1515 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1516 hash = hash_or_sym;
1517 }
1518 else if (SYMBOL_P(hash_or_sym)) {
1519 key = hash_or_sym;
1520 }
1521 else {
1522 rb_bug("non-hash or symbol given");
1523 }
1524
1525#define SET(name, attr) \
1526 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1527 return SIZET2NUM(attr); \
1528 else if (hash != Qnil) \
1529 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1530
1531 SET(count, objspace->gc_count);
1532 SET(moving_gc_count, objspace->moving_gc_count);
1533 SET(time, objspace->total_gc_time / (1000 * 1000));
1534 SET(total_allocated_objects, objspace->total_allocated_objects);
1535 SET(total_bytes, mmtk_total_bytes());
1536 SET(used_bytes, mmtk_used_bytes());
1537 SET(free_bytes, mmtk_free_bytes());
1538 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1539 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1540 SET(weak_references_count, mmtk_weak_references_count());
1541#undef SET
1542
1543 if (!NIL_P(key)) {
1544 // Matched key should return above
1545 return Qundef;
1546 }
1547
1548 return hash;
1549}
1550
1551VALUE
1552rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1553{
1554 if (FIXNUM_P(heap_name) && SYMBOL_P(hash_or_sym)) {
1555 int heap_idx = FIX2INT(heap_name);
1556 if (heap_idx < 0 || heap_idx >= MMTK_HEAP_COUNT) {
1557 rb_raise(rb_eArgError, "size pool index out of range");
1558 }
1559
1560 if (hash_or_sym == ID2SYM(rb_intern("slot_size"))) {
1561 return SIZET2NUM(heap_sizes[heap_idx]);
1562 }
1563
1564 return Qundef;
1565 }
1566
1567 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1568 return hash_or_sym;
1569 }
1570
1571 return Qundef;
1572}
1573
1574// Miscellaneous
1575
1576#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1577static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1578
1580rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1581{
1582 static ID ID_object_id;
1583
1584 if (!ID_object_id) {
1585#define I(s) ID_##s = rb_intern(#s);
1586 I(object_id);
1587#undef I
1588 }
1589
1590 size_t n = 0;
1591
1592#define SET_ENTRY(na, v) do { \
1593 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1594 object_metadata_entries[n].name = ID_##na; \
1595 object_metadata_entries[n].val = v; \
1596 n++; \
1597} while (0)
1598
1599 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1600
1601 object_metadata_entries[n].name = 0;
1602 object_metadata_entries[n].val = 0;
1603
1604 return object_metadata_entries;
1605}
1606
1607bool
1608rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1609{
1610 if (ptr == NULL) return false;
1611 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1612 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1613}
1614
1615bool
1616rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1617{
1618 return false;
1619}
1620
1621void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1622
1623void
1624rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1625{
1626 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1627 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1628 }
1629
1630 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1631}
1632
1633// GC Identification
1634
1635const char *
1636rb_gc_impl_active_gc_name(void)
1637{
1638 return "mmtk";
1639}
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1916
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1882
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:561
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:226
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:95
VALUE rb_mGC
GC module.
Definition gc.c:416
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:141
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:859
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Private header for the default GC and other GC implementations first introduced for [Feature #20470].
Definition gc.h:16
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376