Ruby 4.1.0dev (2025-12-30 revision 966dbba8db970f13065a35d893662a981d0abae5)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *finalizer_table;
28 struct MMTk_final_job *finalizer_jobs;
29 rb_postponed_job_handle_t finalizer_postponed_job;
30
31 struct ccan_list_head ractor_caches;
32 unsigned long live_ractor_cache_count;
33
34 pthread_mutex_t mutex;
35 rb_atomic_t mutator_blocking_count;
36 bool world_stopped;
37 pthread_cond_t cond_world_stopped;
38 pthread_cond_t cond_world_started;
39 size_t start_the_world_count;
40
41 struct rb_gc_vm_context vm_context;
42
43 unsigned int fork_hook_vm_lock_lev;
44};
45
47 struct ccan_list_node list_node;
48
49 MMTk_Mutator *mutator;
50 bool gc_mutator_p;
51
52 MMTk_BumpPointer *bump_pointer;
53};
54
56 struct MMTk_final_job *next;
57 enum {
58 MMTK_FINAL_JOB_DFREE,
59 MMTK_FINAL_JOB_FINALIZE,
60 } kind;
61 union {
62 struct {
63 void (*func)(void *);
64 void *data;
65 } dfree;
66 struct {
67 /* HACK: we store the object ID on the 0th element of this array. */
68 VALUE finalizer_array;
69 } finalize;
70 } as;
71};
72
73#ifdef RB_THREAD_LOCAL_SPECIFIER
74RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
75#else
76# error We currently need language-supported TLS
77#endif
78
79#include <pthread.h>
80
81static void
82rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
83{
84 rb_mmtk_gc_thread_tls = gc_thread_tls;
85}
86
87static bool
88rb_mmtk_is_mutator(void)
89{
90 return ruby_native_thread_p();
91}
92
93static void
94rb_mmtk_stop_the_world(void)
95{
96 struct objspace *objspace = rb_gc_get_objspace();
97
98 int err;
99 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
100 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
101 }
102
103 while (!objspace->world_stopped) {
104 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
105 }
106
107 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
108 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
109 }
110}
111
112static void
113rb_mmtk_resume_mutators(void)
114{
115 struct objspace *objspace = rb_gc_get_objspace();
116
117 int err;
118 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
119 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
120 }
121
122 objspace->world_stopped = false;
123 objspace->gc_count++;
124 pthread_cond_broadcast(&objspace->cond_world_started);
125
126 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
127 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
128 }
129}
130
131static void
132rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
133{
134 struct objspace *objspace = rb_gc_get_objspace();
135
136 size_t starting_gc_count = objspace->gc_count;
137 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
138 int lock_lev = RB_GC_VM_LOCK();
139 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
140 int err;
141 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
142 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
143 }
144
145 if (objspace->gc_count == starting_gc_count) {
146 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
147
148 rb_gc_initialize_vm_context(&objspace->vm_context);
149
150 mutator->gc_mutator_p = true;
151
152 struct timespec gc_start_time;
153 if (objspace->measure_gc_time) {
154 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
155 }
156
157 rb_gc_save_machine_context();
158
159 rb_gc_vm_barrier();
160
161 objspace->world_stopped = true;
162
163 pthread_cond_broadcast(&objspace->cond_world_stopped);
164
165 // Wait for GC end
166 while (objspace->world_stopped) {
167 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
168 }
169
170 if (objspace->measure_gc_time) {
171 struct timespec gc_end_time;
172 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
173
174 objspace->total_gc_time +=
175 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
176 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
177 }
178 }
179
180 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
181 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
182 }
183 RB_GC_VM_UNLOCK(lock_lev);
184}
185
186static void
187rb_mmtk_before_updating_jit_code(void)
188{
189 rb_gc_before_updating_jit_code();
190}
191
192static void
193rb_mmtk_after_updating_jit_code(void)
194{
195 rb_gc_after_updating_jit_code();
196}
197
198static size_t
199rb_mmtk_number_of_mutators(void)
200{
201 struct objspace *objspace = rb_gc_get_objspace();
202 return objspace->live_ractor_cache_count;
203}
204
205static void
206rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
207{
208 struct objspace *objspace = rb_gc_get_objspace();
209 struct MMTk_ractor_cache *ractor_cache;
210
211 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
212 visit_mutator(ractor_cache->mutator, data);
213 }
214}
215
216static void
217rb_mmtk_scan_gc_roots(void)
218{
219 struct objspace *objspace = rb_gc_get_objspace();
220
221 // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC.
222 // See: https://github.com/ruby/mmtk/issues/22
223 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
224 rb_gc_mark_roots(objspace, NULL);
225 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
226}
227
228static int
229pin_value(st_data_t key, st_data_t value, st_data_t data)
230{
231 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
232
233 return ST_CONTINUE;
234}
235
236static void
237rb_mmtk_scan_objspace(void)
238{
239 struct objspace *objspace = rb_gc_get_objspace();
240
241 if (objspace->finalizer_table != NULL) {
242 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
243 }
244
245 struct MMTk_final_job *job = objspace->finalizer_jobs;
246 while (job != NULL) {
247 switch (job->kind) {
248 case MMTK_FINAL_JOB_DFREE:
249 break;
250 case MMTK_FINAL_JOB_FINALIZE:
251 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
252 break;
253 default:
254 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
255 }
256
257 job = job->next;
258 }
259}
260
261static void
262rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
263{
264 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
265}
266
267static void
268rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
269{
270 VALUE object = (VALUE)mmtk_object;
271
272 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
273 rb_gc_update_object_references(rb_gc_get_objspace(), object);
274 }
275}
276
277static void
278rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
279{
280 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
281}
282
283static void
284rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
285{
286 VALUE object = (VALUE)mmtk_object;
287
288 rb_gc_handle_weak_references(object);
289
290 if (moving) {
291 rb_gc_update_object_references(rb_gc_get_objspace(), object);
292 }
293}
294
295static void
296rb_mmtk_call_obj_free(MMTk_ObjectReference object)
297{
298 VALUE obj = (VALUE)object;
299 struct objspace *objspace = rb_gc_get_objspace();
300
301 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
302 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
303 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
304 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
305 }
306
307 rb_gc_obj_free(objspace, obj);
308}
309
310static size_t
311rb_mmtk_vm_live_bytes(void)
312{
313 return 0;
314}
315
316static void
317make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
318{
320 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)table));
322
324
325 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
326 job->next = objspace->finalizer_jobs;
327 job->kind = MMTK_FINAL_JOB_FINALIZE;
328 job->as.finalize.finalizer_array = table;
329
330 objspace->finalizer_jobs = job;
331}
332
333static int
334rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data)
335{
337 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
339
340 struct objspace *objspace = (struct objspace *)data;
341
342 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
343 make_final_job(objspace, (VALUE)key, (VALUE)value);
344
345 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
346
347 return ST_DELETE;
348 }
349
350 return ST_CONTINUE;
351}
352
353static void
354rb_mmtk_update_finalizer_table(void)
355{
356 struct objspace *objspace = rb_gc_get_objspace();
357
358 // TODO: replace with st_foreach_with_replace when GC is moving
359 st_foreach(objspace->finalizer_table, rb_mmtk_update_finalizer_table_i, (st_data_t)objspace);
360}
361
362static int
363rb_mmtk_global_tables_count(void)
364{
365 return RB_GC_VM_WEAK_TABLE_COUNT;
366}
367
368static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
369
370static int
371rb_mmtk_update_global_tables_i(VALUE val, void *data)
372{
373 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
374 return ST_DELETE;
375 }
376
377 // TODO: check only if in moving GC
378 if (rb_mmtk_call_object_closure(val, false) != val) {
379 return ST_REPLACE;
380 }
381
382 return ST_CONTINUE;
383}
384
385static int
386rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
387{
388 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
389 *ptr = rb_mmtk_call_object_closure(*ptr, false);
390
391 return ST_CONTINUE;
392}
393
394static void
395rb_mmtk_update_global_tables(int table)
396{
397 RUBY_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
398
399 // TODO: set weak_only to true for non-moving GC
400 rb_gc_vm_weak_table_foreach(
401 rb_mmtk_update_global_tables_i,
402 rb_mmtk_update_global_tables_replace_i,
403 NULL,
404 false,
405 (enum rb_gc_vm_weak_tables)table
406 );
407}
408
409static bool
410rb_mmtk_special_const_p(MMTk_ObjectReference object)
411{
412 VALUE obj = (VALUE)object;
413
414 return RB_SPECIAL_CONST_P(obj);
415}
416
417static void
418rb_mmtk_mutator_thread_panic_handler(void)
419{
420 rb_bug("Ruby mutator thread panicked");
421}
422
423// Bootup
424MMTk_RubyUpcalls ruby_upcalls = {
425 rb_mmtk_init_gc_worker_thread,
426 rb_mmtk_is_mutator,
427 rb_mmtk_stop_the_world,
428 rb_mmtk_resume_mutators,
429 rb_mmtk_block_for_gc,
430 rb_mmtk_before_updating_jit_code,
431 rb_mmtk_after_updating_jit_code,
432 rb_mmtk_number_of_mutators,
433 rb_mmtk_get_mutators,
434 rb_mmtk_scan_gc_roots,
435 rb_mmtk_scan_objspace,
436 rb_mmtk_move_obj_during_marking,
437 rb_mmtk_update_object_references,
438 rb_mmtk_call_gc_mark_children,
439 rb_mmtk_handle_weak_references,
440 rb_mmtk_call_obj_free,
441 rb_mmtk_vm_live_bytes,
442 rb_mmtk_update_global_tables,
443 rb_mmtk_global_tables_count,
444 rb_mmtk_update_finalizer_table,
445 rb_mmtk_special_const_p,
446 rb_mmtk_mutator_thread_panic_handler,
447};
448
449// Use max 80% of the available memory by default for MMTk
450#define RB_MMTK_HEAP_LIMIT_PERC 80
451#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
452#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
453
454enum mmtk_heap_mode {
455 RB_MMTK_DYNAMIC_HEAP,
456 RB_MMTK_FIXED_HEAP
457};
458
459MMTk_Builder *
460rb_mmtk_builder_init(void)
461{
462 MMTk_Builder *builder = mmtk_builder_default();
463 return builder;
464}
465
466void *
467rb_gc_impl_objspace_alloc(void)
468{
469 MMTk_Builder *builder = rb_mmtk_builder_init();
470 mmtk_init_binding(builder, NULL, &ruby_upcalls);
471
472 return calloc(1, sizeof(struct objspace));
473}
474
475static void gc_run_finalizers(void *data);
476
477void
478rb_gc_impl_objspace_init(void *objspace_ptr)
479{
480 struct objspace *objspace = objspace_ptr;
481
482 objspace->measure_gc_time = true;
483
484 objspace->finalizer_table = st_init_numtable();
485 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
486
487 ccan_list_head_init(&objspace->ractor_caches);
488
489 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
490 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
491 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
492}
493
494void
495rb_gc_impl_objspace_free(void *objspace_ptr)
496{
497 free(objspace_ptr);
498}
499
500void *
501rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
502{
503 struct objspace *objspace = objspace_ptr;
504 if (objspace->live_ractor_cache_count == 0) {
505 mmtk_initialize_collection(ractor);
506 }
507 objspace->live_ractor_cache_count++;
508
509 struct MMTk_ractor_cache *cache = malloc(sizeof(struct MMTk_ractor_cache));
510 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
511
512 cache->mutator = mmtk_bind_mutator(cache);
513 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
514
515 return cache;
516}
517
518void
519rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
520{
521 struct objspace *objspace = objspace_ptr;
522 struct MMTk_ractor_cache *cache = cache_ptr;
523
524 ccan_list_del(&cache->list_node);
525
526 RUBY_ASSERT(objspace->live_ractor_cache_count > 1);
527 objspace->live_ractor_cache_count--;
528
529 mmtk_destroy_mutator(cache->mutator);
530}
531
532void rb_gc_impl_set_params(void *objspace_ptr) { }
533
534static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
535
536#define MMTK_HEAP_COUNT 6
537#define MMTK_MAX_OBJ_SIZE 640
538
539static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
540 32, 40, 80, 160, 320, MMTK_MAX_OBJ_SIZE, 0
541};
542
543void
544rb_gc_impl_init(void)
545{
546 VALUE gc_constants = rb_hash_new();
547 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
548 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
549 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
550 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
551 // Pretend we have 5 size pools
552 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
553 OBJ_FREEZE(gc_constants);
554 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
555
556 // no-ops for compatibility
557 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
558
562 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
563 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
564}
565
566size_t *
567rb_gc_impl_heap_sizes(void *objspace_ptr)
568{
569 return heap_sizes;
570}
571
572int
573rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
574{
575 struct objspace *objspace = data;
576
577 if (!RB_TYPE_P(obj, T_NONE)) {
578 rb_gc_obj_free_vm_weak_references(obj);
579 rb_gc_obj_free(objspace, obj);
580 }
581
582 return 0;
583}
584
585// Shutdown
586static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
587
588void
589rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
590{
591 mmtk_set_gc_enabled(false);
592 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
593 mmtk_set_gc_enabled(true);
594}
595
596// GC
597void
598rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
599{
600 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
601}
602
603bool
604rb_gc_impl_during_gc_p(void *objspace_ptr)
605{
606 // TODO
607 return false;
608}
609
610static void
611rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
612{
613 rb_gc_prepare_heap_process_object((VALUE)obj);
614}
615
616void
617rb_gc_impl_prepare_heap(void *objspace_ptr)
618{
619 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
620}
621
622void
623rb_gc_impl_gc_enable(void *objspace_ptr)
624{
625 mmtk_set_gc_enabled(true);
626}
627
628void
629rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
630{
631 mmtk_set_gc_enabled(false);
632}
633
634bool
635rb_gc_impl_gc_enabled_p(void *objspace_ptr)
636{
637 return mmtk_gc_enabled_p();
638}
639
640void
641rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
642{
643 struct objspace *objspace = objspace_ptr;
644
645 objspace->gc_stress = RTEST(flag);
646}
647
648VALUE
649rb_gc_impl_stress_get(void *objspace_ptr)
650{
651 struct objspace *objspace = objspace_ptr;
652
653 return objspace->gc_stress ? Qtrue : Qfalse;
654}
655
656VALUE
657rb_gc_impl_config_get(void *objspace_ptr)
658{
659 VALUE hash = rb_hash_new();
660
661 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
662 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
663 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
664 size_t heap_min = mmtk_heap_min();
665 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
666 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
667
668 return hash;
669}
670
671void
672rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
673{
674 // TODO
675}
676
677// Object allocation
678
679static VALUE
680rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
681{
682 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
683 if (bump_pointer == NULL) return 0;
684
685 uintptr_t new_cursor = bump_pointer->cursor + size;
686
687 if (new_cursor > bump_pointer->limit) {
688 return 0;
689 }
690 else {
691 VALUE obj = (VALUE)bump_pointer->cursor;
692 bump_pointer->cursor = new_cursor;
693 return obj;
694 }
695}
696
697VALUE
698rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
699{
700#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
701 struct objspace *objspace = objspace_ptr;
702 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
703
704 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
705 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
706 if (alloc_size == heap_sizes[i]) break;
707 if (alloc_size < heap_sizes[i]) {
708 alloc_size = heap_sizes[i];
709 break;
710 }
711 }
712
713 if (objspace->gc_stress) {
714 mmtk_handle_user_collection_request(ractor_cache, false, false);
715 }
716
717 alloc_size += sizeof(VALUE);
718
719 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
720 if (!alloc_obj) {
721 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
722 }
723
724 alloc_obj++;
725 alloc_obj[-1] = alloc_size - sizeof(VALUE);
726 alloc_obj[0] = flags;
727 alloc_obj[1] = klass;
728
729 // TODO: implement fast path for mmtk_post_alloc
730 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
731
732 // TODO: only add when object needs obj_free to be called
733 mmtk_add_obj_free_candidate(alloc_obj);
734
735 objspace->total_allocated_objects++;
736
737 return (VALUE)alloc_obj;
738}
739
740size_t
741rb_gc_impl_obj_slot_size(VALUE obj)
742{
743 return ((VALUE *)obj)[-1];
744}
745
746size_t
747rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
748{
749 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
750 if (size == heap_sizes[i]) return i;
751 if (size < heap_sizes[i]) return i;
752 }
753
754 rb_bug("size too big");
755}
756
757bool
758rb_gc_impl_size_allocatable_p(size_t size)
759{
760 return size <= MMTK_MAX_OBJ_SIZE;
761}
762
763// Malloc
764void *
765rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
766{
767 // TODO: don't use system malloc
768 return malloc(size);
769}
770
771void *
772rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
773{
774 // TODO: don't use system calloc
775 return calloc(1, size);
776}
777
778void *
779rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
780{
781 // TODO: don't use system realloc
782 return realloc(ptr, new_size);
783}
784
785void
786rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
787{
788 // TODO: don't use system free
789 free(ptr);
790}
791
792void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
793
794// Marking
795static inline VALUE
796rb_mmtk_call_object_closure(VALUE obj, bool pin)
797{
798 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
799 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
800 rb_mmtk_gc_thread_tls->gc_context,
801 (MMTk_ObjectReference)obj,
802 pin
803 );
804}
805
806void
807rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
808{
809 if (RB_SPECIAL_CONST_P(obj)) return;
810
811 rb_mmtk_call_object_closure(obj, false);
812}
813
814void
815rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
816{
817 if (RB_SPECIAL_CONST_P(*ptr)) return;
818
819 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
820 if (new_obj != *ptr) {
821 *ptr = new_obj;
822 }
823}
824
825void
826rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
827{
828 if (RB_SPECIAL_CONST_P(obj)) return;
829
830 rb_mmtk_call_object_closure(obj, true);
831}
832
833void
834rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
835{
836 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
837 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
838 }
839}
840
841void
842rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
843{
845 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
846}
847
848bool
849rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
850{
851 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
852}
853
854// Compaction
855void
856rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
857{
858 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
859}
860
861bool
862rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
863{
864 return rb_mmtk_call_object_closure(obj, false) != obj;
865}
866
867VALUE
868rb_gc_impl_location(void *objspace_ptr, VALUE obj)
869{
870 return rb_mmtk_call_object_closure(obj, false);
871}
872
873// Write barriers
874void
875rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
876{
877 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
878
879 if (SPECIAL_CONST_P(b)) return;
880
881 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
882}
883
884void
885rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
886{
887 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
888}
889
890void
891rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
892{
893 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
894
895 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
896}
897
898// Heap walking
899static void
900each_objects_i(MMTk_ObjectReference obj, void *d)
901{
902 rb_darray(VALUE) *objs = d;
903
904 rb_darray_append(objs, (VALUE)obj);
905}
906
907static void
908each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
909{
910 rb_darray(VALUE) objs;
911 rb_darray_make(&objs, 0);
912
913 mmtk_enumerate_objects(each_objects_i, &objs);
914
915 VALUE *obj_ptr;
916 rb_darray_foreach(objs, i, obj_ptr) {
917 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
918
919 if (func(*obj_ptr, data) != 0) {
920 break;
921 }
922 }
923
924 rb_darray_free(objs);
925}
926
928 int (*func)(void *, void *, size_t, void *);
929 void *data;
930};
931
932static int
933rb_gc_impl_each_objects_i(VALUE obj, void *d)
934{
935 struct rb_gc_impl_each_objects_data *data = d;
936
937 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
938
939 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
940}
941
942void
943rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
944{
945 struct rb_gc_impl_each_objects_data each_objects_data = {
946 .func = func,
947 .data = data
948 };
949
950 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
951}
952
954 void (*func)(VALUE, void *);
955 void *data;
956};
957
958static int
959rb_gc_impl_each_object_i(VALUE obj, void *d)
960{
961 struct rb_gc_impl_each_object_data *data = d;
962
963 data->func(obj, data->data);
964
965 return 0;
966}
967
968void
969rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
970{
971 struct rb_gc_impl_each_object_data each_object_data = {
972 .func = func,
973 .data = data
974 };
975
976 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
977}
978
979// Finalizers
980static VALUE
981gc_run_finalizers_get_final(long i, void *data)
982{
983 VALUE table = (VALUE)data;
984
985 return RARRAY_AREF(table, i + 1);
986}
987
988static void
989gc_run_finalizers(void *data)
990{
991 struct objspace *objspace = data;
992
993 rb_gc_set_pending_interrupt();
994
995 while (objspace->finalizer_jobs != NULL) {
996 struct MMTk_final_job *job = objspace->finalizer_jobs;
997 objspace->finalizer_jobs = job->next;
998
999 switch (job->kind) {
1000 case MMTK_FINAL_JOB_DFREE:
1001 job->as.dfree.func(job->as.dfree.data);
1002 break;
1003 case MMTK_FINAL_JOB_FINALIZE: {
1004 VALUE finalizer_array = job->as.finalize.finalizer_array;
1005
1006 rb_gc_run_obj_finalizer(
1007 RARRAY_AREF(finalizer_array, 0),
1008 RARRAY_LEN(finalizer_array) - 1,
1009 gc_run_finalizers_get_final,
1010 (void *)finalizer_array
1011 );
1012
1013 RB_GC_GUARD(finalizer_array);
1014 break;
1015 }
1016 }
1017
1018 xfree(job);
1019 }
1020
1021 rb_gc_unset_pending_interrupt();
1022}
1023
1024void
1025rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1026{
1027 if (dfree == NULL) return;
1028
1029 struct objspace *objspace = objspace_ptr;
1030
1031 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1032 job->kind = MMTK_FINAL_JOB_DFREE;
1033 job->as.dfree.func = dfree;
1034 job->as.dfree.data = data;
1035
1036 struct MMTk_final_job *prev;
1037 do {
1038 job->next = objspace->finalizer_jobs;
1039 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1040 } while (prev != job->next);
1041
1042 if (!ruby_free_at_exit_p()) {
1043 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1044 }
1045}
1046
1047VALUE
1048rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1049{
1050 struct objspace *objspace = objspace_ptr;
1051 VALUE table;
1052 st_data_t data;
1053
1054 RBASIC(obj)->flags |= FL_FINALIZE;
1055
1056 int lev = RB_GC_VM_LOCK();
1057
1058 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1059 table = (VALUE)data;
1060
1061 /* avoid duplicate block, table is usually small */
1062 {
1063 long len = RARRAY_LEN(table);
1064 long i;
1065
1066 for (i = 0; i < len; i++) {
1067 VALUE recv = RARRAY_AREF(table, i);
1068 if (rb_equal(recv, block)) {
1069 RB_GC_VM_UNLOCK(lev);
1070 return recv;
1071 }
1072 }
1073 }
1074
1075 rb_ary_push(table, block);
1076 }
1077 else {
1078 table = rb_ary_new3(2, rb_obj_id(obj), block);
1079 rb_obj_hide(table);
1080 st_add_direct(objspace->finalizer_table, obj, table);
1081 }
1082
1083 RB_GC_VM_UNLOCK(lev);
1084
1085 return block;
1086}
1087
1088void
1089rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1090{
1091 struct objspace *objspace = objspace_ptr;
1092
1093 st_data_t data = obj;
1094
1095 int lev = RB_GC_VM_LOCK();
1096 st_delete(objspace->finalizer_table, &data, 0);
1097 RB_GC_VM_UNLOCK(lev);
1098
1099 FL_UNSET(obj, FL_FINALIZE);
1100}
1101
1102void
1103rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1104{
1105 struct objspace *objspace = objspace_ptr;
1106 VALUE table;
1107 st_data_t data;
1108
1109 if (!FL_TEST(obj, FL_FINALIZE)) return;
1110
1111 int lev = RB_GC_VM_LOCK();
1112 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1113 table = rb_ary_dup((VALUE)data);
1114 RARRAY_ASET(table, 0, rb_obj_id(dest));
1115 st_insert(objspace->finalizer_table, dest, table);
1116 FL_SET(dest, FL_FINALIZE);
1117 }
1118 else {
1119 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1120 }
1121 RB_GC_VM_UNLOCK(lev);
1122}
1123
1124static int
1125move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1126{
1127 struct objspace *objspace = (struct objspace *)arg;
1128
1129 make_final_job(objspace, (VALUE)key, (VALUE)val);
1130
1131 return ST_DELETE;
1132}
1133
1134void
1135rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1136{
1137 struct objspace *objspace = objspace_ptr;
1138
1139 while (objspace->finalizer_table->num_entries) {
1140 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1141
1142 gc_run_finalizers(objspace);
1143 }
1144
1145 unsigned int lev = RB_GC_VM_LOCK();
1146 {
1147 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1148 for (size_t i = 0; i < registered_candidates.len; i++) {
1149 VALUE obj = (VALUE)registered_candidates.ptr[i];
1150
1151 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1152 rb_gc_obj_free(objspace_ptr, obj);
1153 RBASIC(obj)->flags = 0;
1154 }
1155 }
1156 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1157 }
1158 RB_GC_VM_UNLOCK(lev);
1159
1160 gc_run_finalizers(objspace);
1161}
1162
1163// Forking
1164
1165void
1166rb_gc_impl_before_fork(void *objspace_ptr)
1167{
1168 struct objspace *objspace = objspace_ptr;
1169
1170 retry:
1171 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1172 rb_gc_vm_barrier();
1173
1174 /* At this point, we know that all the Ractors are paused because of the
1175 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1176 * one or more Ractors could be paused there. However, mmtk_before_fork is
1177 * not compatible with that because it assumes that the MMTk workers are idle,
1178 * but the workers are not idle because they are busy working on a GC.
1179 *
1180 * This essentially implements a trylock. It will optimistically lock but will
1181 * release the lock if it detects that any other Ractors are waiting in
1182 * rb_mmtk_block_for_gc.
1183 */
1184 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1185 if (mutator_blocking_count != 0) {
1186 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1187 goto retry;
1188 }
1189
1190 mmtk_before_fork();
1191}
1192
1193void
1194rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1195{
1196 struct objspace *objspace = objspace_ptr;
1197
1198 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1199
1200 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1201}
1202
1203// Statistics
1204
1205void
1206rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1207{
1208 struct objspace *objspace = objspace_ptr;
1209
1210 objspace->measure_gc_time = RTEST(flag);
1211}
1212
1213bool
1214rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1215{
1216 struct objspace *objspace = objspace_ptr;
1217
1218 return objspace->measure_gc_time;
1219}
1220
1221unsigned long long
1222rb_gc_impl_get_total_time(void *objspace_ptr)
1223{
1224 struct objspace *objspace = objspace_ptr;
1225
1226 return objspace->total_gc_time;
1227}
1228
1229size_t
1230rb_gc_impl_gc_count(void *objspace_ptr)
1231{
1232 struct objspace *objspace = objspace_ptr;
1233
1234 return objspace->gc_count;
1235}
1236
1237VALUE
1238rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1239{
1240 VALUE hash = Qnil, key = Qnil;
1241
1242 if (SYMBOL_P(hash_or_key)) {
1243 key = hash_or_key;
1244 }
1245 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1246 hash = hash_or_key;
1247 }
1248 else {
1249 rb_bug("gc_info_decode: non-hash or symbol given");
1250 }
1251
1252#define SET(name, attr) \
1253 if (key == ID2SYM(rb_intern_const(#name))) \
1254 return (attr); \
1255 else if (hash != Qnil) \
1256 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1257
1258 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1259 * the :state key and expects a result. This always returns the :none state. */
1260 SET(state, ID2SYM(rb_intern_const("none")));
1261#undef SET
1262
1263 if (!NIL_P(key)) {
1264 // Matched key should return above
1265 return Qundef;
1266 }
1267
1268 return hash;
1269}
1270
1271enum gc_stat_sym {
1272 gc_stat_sym_count,
1273 gc_stat_sym_time,
1274 gc_stat_sym_total_allocated_objects,
1275 gc_stat_sym_total_bytes,
1276 gc_stat_sym_used_bytes,
1277 gc_stat_sym_free_bytes,
1278 gc_stat_sym_starting_heap_address,
1279 gc_stat_sym_last_heap_address,
1280 gc_stat_sym_last
1281};
1282
1283static VALUE gc_stat_symbols[gc_stat_sym_last];
1284
1285static void
1286setup_gc_stat_symbols(void)
1287{
1288 if (gc_stat_symbols[0] == 0) {
1289#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1290 S(count);
1291 S(time);
1292 S(total_allocated_objects);
1293 S(total_bytes);
1294 S(used_bytes);
1295 S(free_bytes);
1296 S(starting_heap_address);
1297 S(last_heap_address);
1298 }
1299}
1300
1301VALUE
1302rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1303{
1304 struct objspace *objspace = objspace_ptr;
1305 VALUE hash = Qnil, key = Qnil;
1306
1307 setup_gc_stat_symbols();
1308
1309 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1310 hash = hash_or_sym;
1311 }
1312 else if (SYMBOL_P(hash_or_sym)) {
1313 key = hash_or_sym;
1314 }
1315 else {
1316 rb_bug("non-hash or symbol given");
1317 }
1318
1319#define SET(name, attr) \
1320 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1321 return SIZET2NUM(attr); \
1322 else if (hash != Qnil) \
1323 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1324
1325 SET(count, objspace->gc_count);
1326 SET(time, objspace->total_gc_time / (1000 * 1000));
1327 SET(total_allocated_objects, objspace->total_allocated_objects);
1328 SET(total_bytes, mmtk_total_bytes());
1329 SET(used_bytes, mmtk_used_bytes());
1330 SET(free_bytes, mmtk_free_bytes());
1331 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1332 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1333#undef SET
1334
1335 if (!NIL_P(key)) {
1336 // Matched key should return above
1337 return Qundef;
1338 }
1339
1340 return hash;
1341}
1342
1343VALUE
1344rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1345{
1346 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1347 return hash_or_sym;
1348 }
1349 else {
1350 return Qundef;
1351 }
1352}
1353
1354// Miscellaneous
1355
1356#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1357static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1358
1360rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1361{
1362 static ID ID_object_id;
1363
1364 if (!ID_object_id) {
1365#define I(s) ID_##s = rb_intern(#s);
1366 I(object_id);
1367#undef I
1368 }
1369
1370 size_t n = 0;
1371
1372#define SET_ENTRY(na, v) do { \
1373 RUBY_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1374 object_metadata_entries[n].name = ID_##na; \
1375 object_metadata_entries[n].val = v; \
1376 n++; \
1377} while (0)
1378
1379 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1380
1381 object_metadata_entries[n].name = 0;
1382 object_metadata_entries[n].val = 0;
1383
1384 return object_metadata_entries;
1385}
1386
1387bool
1388rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1389{
1390 if (ptr == NULL) return false;
1391 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1392 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1393}
1394
1395bool
1396rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1397{
1398 return false;
1399}
1400
1401void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1402
1403void
1404rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1405{
1406 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1407 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1408 }
1409
1410 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1411}
1412
1413// GC Identification
1414
1415const char *
1416rb_gc_impl_active_gc_name(void)
1417{
1418 return "mmtk";
1419}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1952
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1918
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:441
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:572
static void RB_FL_UNSET(VALUE obj, VALUE flags)
Clears the given flag(s).
Definition fl_type.h:627
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:237
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:271
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:133
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:127
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:129
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:131
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_mGC
GC module.
Definition gc.c:421
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:176
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1513
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:801
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5791
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376