Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19struct objspace {
20 bool measure_gc_time;
21 bool gc_stress;
22
23 size_t gc_count;
24 size_t total_gc_time;
25 size_t total_allocated_objects;
26
27 st_table *id_to_obj_tbl;
28 st_table *obj_to_id_tbl;
29 unsigned long long next_object_id;
30
31 st_table *finalizer_table;
32 struct MMTk_final_job *finalizer_jobs;
33 rb_postponed_job_handle_t finalizer_postponed_job;
34
35 struct ccan_list_head ractor_caches;
36 unsigned long live_ractor_cache_count;
37
38 pthread_mutex_t mutex;
39 bool world_stopped;
40 pthread_cond_t cond_world_stopped;
41 pthread_cond_t cond_world_started;
42 size_t start_the_world_count;
43
44 struct rb_gc_vm_context vm_context;
45};
46
48 struct ccan_list_node list_node;
49
50 MMTk_Mutator *mutator;
51 bool gc_mutator_p;
52};
53
55 struct MMTk_final_job *next;
56 enum {
57 MMTK_FINAL_JOB_DFREE,
58 MMTK_FINAL_JOB_FINALIZE,
59 } kind;
60 union {
61 struct {
62 void (*func)(void *);
63 void *data;
64 } dfree;
65 struct {
66 VALUE object_id;
67 VALUE finalizer_array;
68 } finalize;
69 } as;
70};
71
72#ifdef RB_THREAD_LOCAL_SPECIFIER
73RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
74#else
75# error We currently need language-supported TLS
76#endif
77
78#include <pthread.h>
79
80static void
81rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
82{
83 rb_mmtk_gc_thread_tls = gc_thread_tls;
84}
85
86static bool
87rb_mmtk_is_mutator(void)
88{
89 return ruby_native_thread_p();
90}
91
92static void
93rb_mmtk_stop_the_world(void)
94{
95 struct objspace *objspace = rb_gc_get_objspace();
96
97 int err;
98 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
99 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
100 }
101
102 while (!objspace->world_stopped) {
103 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
104 }
105
106 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
107 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
108 }
109}
110
111static void
112rb_mmtk_resume_mutators(void)
113{
114 struct objspace *objspace = rb_gc_get_objspace();
115
116 int err;
117 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
118 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
119 }
120
121 objspace->world_stopped = false;
122 objspace->gc_count++;
123 pthread_cond_broadcast(&objspace->cond_world_started);
124
125 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
126 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
127 }
128}
129
130static void
131rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
132{
133 struct objspace *objspace = rb_gc_get_objspace();
134
135 size_t starting_gc_count = objspace->gc_count;
136 int lock_lev = rb_gc_vm_lock();
137 int err;
138 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
139 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
140 }
141
142 if (objspace->gc_count == starting_gc_count) {
143 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
144
145 rb_gc_initialize_vm_context(&objspace->vm_context);
146
147 mutator->gc_mutator_p = true;
148
149 struct timespec gc_start_time;
150 if (objspace->measure_gc_time) {
151 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
152 }
153
154 rb_gc_save_machine_context();
155
156 rb_gc_vm_barrier();
157
158 objspace->world_stopped = true;
159
160 pthread_cond_broadcast(&objspace->cond_world_stopped);
161
162 // Wait for GC end
163 while (objspace->world_stopped) {
164 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
165 }
166
167 if (objspace->measure_gc_time) {
168 struct timespec gc_end_time;
169 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
170
171 objspace->total_gc_time +=
172 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
173 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
174 }
175 }
176
177 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
178 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
179 }
180 rb_gc_vm_unlock(lock_lev);
181}
182
183static size_t
184rb_mmtk_number_of_mutators(void)
185{
186 struct objspace *objspace = rb_gc_get_objspace();
187 return objspace->live_ractor_cache_count;
188}
189
190static void
191rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
192{
193 struct objspace *objspace = rb_gc_get_objspace();
194
195 struct MMTk_ractor_cache *ractor_cache;
196 RUBY_ASSERT(ractor_cache != NULL);
197
198 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
199 visit_mutator(ractor_cache->mutator, data);
200 }
201}
202
203static void
204rb_mmtk_scan_gc_roots(void)
205{
206 // rb_gc_mark_roots(rb_gc_get_objspace(), NULL);
207}
208
209static int
210pin_value(st_data_t key, st_data_t value, st_data_t data)
211{
212 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
213
214 return ST_CONTINUE;
215}
216
217static void
218rb_mmtk_scan_objspace(void)
219{
220 struct objspace *objspace = rb_gc_get_objspace();
221
222 if (objspace->finalizer_table != NULL) {
223 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
224 }
225
226 st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
227
228 struct MMTk_final_job *job = objspace->finalizer_jobs;
229 while (job != NULL) {
230 switch (job->kind) {
231 case MMTK_FINAL_JOB_DFREE:
232 break;
233 case MMTK_FINAL_JOB_FINALIZE:
234 rb_gc_impl_mark(objspace, job->as.finalize.object_id);
235 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
236 break;
237 default:
238 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
239 }
240
241 job = job->next;
242 }
243}
244
245static void
246rb_mmtk_scan_roots_in_mutator_thread(MMTk_VMMutatorThread mutator, MMTk_VMWorkerThread worker)
247{
248 if (mutator->gc_mutator_p) {
249 struct objspace *objspace = rb_gc_get_objspace();
250
251 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
252 rb_gc_mark_roots(objspace, NULL);
253 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
254 }
255}
256
257static void
258rb_mmtk_scan_object_ruby_style(MMTk_ObjectReference object)
259{
260 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
261}
262
263static void
264rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
265{
266 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
267}
268
269static void
270rb_mmtk_call_obj_free(MMTk_ObjectReference object)
271{
272 VALUE obj = (VALUE)object;
273 struct objspace *objspace = rb_gc_get_objspace();
274
275 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
276 rb_gc_worker_thread_set_vm_context(&objspace->vm_context);
277 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
278 rb_gc_worker_thread_unset_vm_context(&objspace->vm_context);
279 }
280
281 rb_gc_obj_free(objspace, obj);
282}
283
284static size_t
285rb_mmtk_vm_live_bytes(void)
286{
287 return 0;
288}
289
290static void
291make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
292{
294 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)table));
296
298
299 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
300 job->next = objspace->finalizer_jobs;
301 job->kind = MMTK_FINAL_JOB_FINALIZE;
302 job->as.finalize.object_id = rb_obj_id((VALUE)obj);
303 job->as.finalize.finalizer_array = table;
304
305 objspace->finalizer_jobs = job;
306}
307
308static int
309rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data)
310{
312 RUBY_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
314
315 struct objspace *objspace = (struct objspace *)data;
316
317 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
318 make_final_job(objspace, (VALUE)key, (VALUE)value);
319
320 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
321
322 return ST_DELETE;
323 }
324
325 return ST_CONTINUE;
326}
327
328static void
329rb_mmtk_update_finalizer_table(void)
330{
331 struct objspace *objspace = rb_gc_get_objspace();
332
333 // TODO: replace with st_foreach_with_replace when GC is moving
334 st_foreach(objspace->finalizer_table, rb_mmtk_update_finalizer_table_i, (st_data_t)objspace);
335}
336
337static int
338rb_mmtk_update_table_i(VALUE val, void *data)
339{
340 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
341 return ST_DELETE;
342 }
343
344 return ST_CONTINUE;
345}
346
347static int
348rb_mmtk_update_obj_id_tables_obj_to_id_i(st_data_t key, st_data_t val, st_data_t data)
349{
351
352 if (!mmtk_is_reachable((MMTk_ObjectReference)key)) {
353 return ST_DELETE;
354 }
355
356 return ST_CONTINUE;
357}
358
359static int
360rb_mmtk_update_obj_id_tables_id_to_obj_i(st_data_t key, st_data_t val, st_data_t data)
361{
363
364 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
365 return ST_DELETE;
366 }
367
368 return ST_CONTINUE;
369}
370
371static void
372rb_mmtk_update_obj_id_tables(void)
373{
374 struct objspace *objspace = rb_gc_get_objspace();
375
376 st_foreach(objspace->obj_to_id_tbl, rb_mmtk_update_obj_id_tables_obj_to_id_i, 0);
377 st_foreach(objspace->id_to_obj_tbl, rb_mmtk_update_obj_id_tables_id_to_obj_i, 0);
378}
379
380static int
381rb_mmtk_global_tables_count(void)
382{
383 return RB_GC_VM_WEAK_TABLE_COUNT;
384}
385
386static void
387rb_mmtk_update_global_tables(int table)
388{
389 RUBY_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
390
391 rb_gc_vm_weak_table_foreach(rb_mmtk_update_table_i, NULL, NULL, (enum rb_gc_vm_weak_tables)table);
392}
393
394// Bootup
395MMTk_RubyUpcalls ruby_upcalls = {
396 rb_mmtk_init_gc_worker_thread,
397 rb_mmtk_is_mutator,
398 rb_mmtk_stop_the_world,
399 rb_mmtk_resume_mutators,
400 rb_mmtk_block_for_gc,
401 rb_mmtk_number_of_mutators,
402 rb_mmtk_get_mutators,
403 rb_mmtk_scan_gc_roots,
404 rb_mmtk_scan_objspace,
405 rb_mmtk_scan_roots_in_mutator_thread,
406 rb_mmtk_scan_object_ruby_style,
407 rb_mmtk_call_gc_mark_children,
408 rb_mmtk_call_obj_free,
409 rb_mmtk_vm_live_bytes,
410 rb_mmtk_update_global_tables,
411 rb_mmtk_global_tables_count,
412 rb_mmtk_update_finalizer_table,
413 rb_mmtk_update_obj_id_tables,
414};
415
416// Use max 80% of the available memory by default for MMTk
417#define RB_MMTK_HEAP_LIMIT_PERC 80
418#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
419#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
420
421enum mmtk_heap_mode {
422 RB_MMTK_DYNAMIC_HEAP,
423 RB_MMTK_FIXED_HEAP
424};
425
426MMTk_Builder *
427rb_mmtk_builder_init(void)
428{
429 MMTk_Builder *builder = mmtk_builder_default();
430 return builder;
431}
432
433void *
434rb_gc_impl_objspace_alloc(void)
435{
436 MMTk_Builder *builder = rb_mmtk_builder_init();
437 mmtk_init_binding(builder, NULL, &ruby_upcalls, (MMTk_ObjectReference)Qundef);
438
439 return calloc(1, sizeof(struct objspace));
440}
441
442static void objspace_obj_id_init(struct objspace *objspace);
443static void gc_run_finalizers(void *data);
444
445void
446rb_gc_impl_objspace_init(void *objspace_ptr)
447{
448 struct objspace *objspace = objspace_ptr;
449
450 objspace->measure_gc_time = true;
451
452 objspace_obj_id_init(objspace);
453
454 objspace->finalizer_table = st_init_numtable();
455 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
456
457 ccan_list_head_init(&objspace->ractor_caches);
458
459 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
460 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
461 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
462}
463
464void
465rb_gc_impl_objspace_free(void *objspace_ptr)
466{
467 free(objspace_ptr);
468}
469
470void *
471rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
472{
473 struct objspace *objspace = objspace_ptr;
474 if (objspace->live_ractor_cache_count == 0) {
475 mmtk_initialize_collection(ractor);
476 }
477 objspace->live_ractor_cache_count++;
478
479 struct MMTk_ractor_cache *cache = malloc(sizeof(struct MMTk_ractor_cache));
480 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
481
482 cache->mutator = mmtk_bind_mutator(cache);
483
484 return cache;
485}
486
487void
488rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
489{
490 struct objspace *objspace = objspace_ptr;
491 struct MMTk_ractor_cache *cache = cache_ptr;
492
493 ccan_list_del(&cache->list_node);
494
495 RUBY_ASSERT(objspace->live_ractor_cache_count > 1);
496 objspace->live_ractor_cache_count--;
497
498 mmtk_destroy_mutator(cache->mutator);
499}
500
501void rb_gc_impl_set_params(void *objspace_ptr) { }
502
503static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
504
505void
506rb_gc_impl_init(void)
507{
508 VALUE gc_constants = rb_hash_new();
509 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
510 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
511 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(640));
512 // Pretend we have 5 size pools
513 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
514 OBJ_FREEZE(gc_constants);
515 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
516
517 // no-ops for compatibility
518 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
519
523 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
524 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
525}
526
527static size_t heap_sizes[6] = {
528 40, 80, 160, 320, 640, 0
529};
530
531size_t *
532rb_gc_impl_heap_sizes(void *objspace_ptr)
533{
534 return heap_sizes;
535}
536
537int
538rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
539{
540 struct objspace *objspace = data;
541
542 if (!RB_TYPE_P(obj, T_NONE)) {
543 rb_gc_obj_free_vm_weak_references(obj);
544 rb_gc_obj_free(objspace, obj);
545 }
546
547 return 0;
548}
549
550// Shutdown
551static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
552
553void
554rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
555{
556 mmtk_set_gc_enabled(false);
557 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
558 mmtk_set_gc_enabled(true);
559}
560
561// GC
562void
563rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
564{
565 bool enabled = mmtk_gc_enabled_p();
566 if (!enabled) {
567 mmtk_set_gc_enabled(true);
568 }
569
570 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache());
571
572 if (!enabled) {
573 mmtk_set_gc_enabled(false);
574 }
575}
576
577bool
578rb_gc_impl_during_gc_p(void *objspace_ptr)
579{
580 // TODO
581 return false;
582}
583
584static void
585rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
586{
587 rb_gc_prepare_heap_process_object((VALUE)obj);
588}
589
590void
591rb_gc_impl_prepare_heap(void *objspace_ptr)
592{
593 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
594}
595
596void
597rb_gc_impl_gc_enable(void *objspace_ptr)
598{
599 mmtk_set_gc_enabled(true);
600}
601
602void
603rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
604{
605 mmtk_set_gc_enabled(false);
606}
607
608bool
609rb_gc_impl_gc_enabled_p(void *objspace_ptr)
610{
611 return mmtk_gc_enabled_p();
612}
613
614void
615rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
616{
617 struct objspace *objspace = objspace_ptr;
618
619 objspace->gc_stress = RTEST(flag);
620}
621
622VALUE
623rb_gc_impl_stress_get(void *objspace_ptr)
624{
625 struct objspace *objspace = objspace_ptr;
626
627 return objspace->gc_stress ? Qtrue : Qfalse;
628}
629
630VALUE
631rb_gc_impl_config_get(void *objspace_ptr)
632{
633 // TODO
634 return rb_hash_new();
635}
636
637void
638rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
639{
640 // TODO
641}
642
643// Object allocation
644
645VALUE
646rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
647{
648#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
649 struct objspace *objspace = objspace_ptr;
650 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
651
652 if (alloc_size > 640) rb_bug("too big");
653 for (int i = 0; i < 5; i++) {
654 if (alloc_size == heap_sizes[i]) break;
655 if (alloc_size < heap_sizes[i]) {
656 alloc_size = heap_sizes[i];
657 break;
658 }
659 }
660
661 if (objspace->gc_stress) {
662 mmtk_handle_user_collection_request(ractor_cache);
663 }
664
665 VALUE *alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size + 8, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
666 alloc_obj++;
667 alloc_obj[-1] = alloc_size;
668 alloc_obj[0] = flags;
669 alloc_obj[1] = klass;
670 if (alloc_size > 16) alloc_obj[2] = v1;
671 if (alloc_size > 24) alloc_obj[3] = v2;
672 if (alloc_size > 32) alloc_obj[4] = v3;
673
674 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size + 8, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
675
676 // TODO: only add when object needs obj_free to be called
677 mmtk_add_obj_free_candidate(alloc_obj);
678
679 objspace->total_allocated_objects++;
680
681 return (VALUE)alloc_obj;
682}
683
684size_t
685rb_gc_impl_obj_slot_size(VALUE obj)
686{
687 return ((VALUE *)obj)[-1];
688}
689
690size_t
691rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
692{
693 for (int i = 0; i < 5; i++) {
694 if (size == heap_sizes[i]) return i;
695 if (size < heap_sizes[i]) return i;
696 }
697
698 rb_bug("size too big");
699}
700
701bool
702rb_gc_impl_size_allocatable_p(size_t size)
703{
704 return size <= 640;
705}
706
707// Malloc
708void *
709rb_gc_impl_malloc(void *objspace_ptr, size_t size)
710{
711 // TODO: don't use system malloc
712 return malloc(size);
713}
714
715void *
716rb_gc_impl_calloc(void *objspace_ptr, size_t size)
717{
718 // TODO: don't use system calloc
719 return calloc(1, size);
720}
721
722void *
723rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
724{
725 // TODO: don't use system realloc
726 return realloc(ptr, new_size);
727}
728
729void
730rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
731{
732 // TODO: don't use system free
733 free(ptr);
734}
735
736void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
737
738// Marking
739void
740rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
741{
742 if (RB_SPECIAL_CONST_P(obj)) return;
743
744 rb_mmtk_gc_thread_tls->object_closure.c_function(rb_mmtk_gc_thread_tls->object_closure.rust_closure,
745 rb_mmtk_gc_thread_tls->gc_context,
746 (MMTk_ObjectReference)obj,
747 false);
748}
749
750void
751rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
752{
753 if (RB_SPECIAL_CONST_P(*ptr)) return;
754
755 // TODO: make it movable
756 rb_gc_impl_mark(objspace_ptr, *ptr);
757}
758
759void
760rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
761{
762 if (RB_SPECIAL_CONST_P(obj)) return;
763
764 // TODO: also pin
765 rb_gc_impl_mark(objspace_ptr, obj);
766}
767
768void
769rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
770{
771 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
772 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
773 }
774}
775
776void
777rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
778{
779 mmtk_mark_weak((MMTk_ObjectReference *)ptr);
780}
781
782void
783rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
784{
785 mmtk_remove_weak((MMTk_ObjectReference *)ptr);
786}
787
788// Compaction
789bool
790rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
791{
792 rb_bug("unimplemented");
793}
794
795VALUE
796rb_gc_impl_location(void *objspace_ptr, VALUE value)
797{
798 rb_bug("unimplemented");
799}
800
801// Write barriers
802void
803rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
804{
805 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
806
807 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
808}
809
810void
811rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
812{
813 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
814}
815
816void
817rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
818{
819 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
820
821 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
822}
823
824// Heap walking
825static void
826each_objects_i(MMTk_ObjectReference obj, void *d)
827{
828 rb_darray(VALUE) *objs = d;
829
830 rb_darray_append(objs, (VALUE)obj);
831}
832
833static void
834each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
835{
836 rb_darray(VALUE) objs;
837 rb_darray_make(&objs, 0);
838
839 mmtk_enumerate_objects(each_objects_i, &objs);
840
841 VALUE *obj_ptr;
842 rb_darray_foreach(objs, i, obj_ptr) {
843 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
844
845 if (func(*obj_ptr, data) != 0) {
846 break;
847 }
848 }
849
850 rb_darray_free(objs);
851}
852
854 int (*func)(void *, void *, size_t, void *);
855 void *data;
856};
857
858static int
859rb_gc_impl_each_objects_i(VALUE obj, void *d)
860{
861 struct rb_gc_impl_each_objects_data *data = d;
862
863 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
864
865 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
866}
867
868void
869rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
870{
871 struct rb_gc_impl_each_objects_data each_objects_data = {
872 .func = func,
873 .data = data
874 };
875
876 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
877}
878
880 void (*func)(VALUE, void *);
881 void *data;
882};
883
884static int
885rb_gc_impl_each_object_i(VALUE obj, void *d)
886{
887 struct rb_gc_impl_each_object_data *data = d;
888
889 data->func(obj, data->data);
890
891 return 0;
892}
893
894void
895rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
896{
897 struct rb_gc_impl_each_object_data each_object_data = {
898 .func = func,
899 .data = data
900 };
901
902 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
903}
904
905// Finalizers
906static VALUE
907gc_run_finalizers_get_final(long i, void *data)
908{
909 VALUE table = (VALUE)data;
910
911 return RARRAY_AREF(table, i);
912}
913
914static void
915gc_run_finalizers(void *data)
916{
917 struct objspace *objspace = data;
918
919 rb_gc_set_pending_interrupt();
920
921 while (objspace->finalizer_jobs != NULL) {
922 struct MMTk_final_job *job = objspace->finalizer_jobs;
923 objspace->finalizer_jobs = job->next;
924
925 switch (job->kind) {
926 case MMTK_FINAL_JOB_DFREE:
927 job->as.dfree.func(job->as.dfree.data);
928 break;
929 case MMTK_FINAL_JOB_FINALIZE: {
930 VALUE object_id = job->as.finalize.object_id;
931 VALUE finalizer_array = job->as.finalize.finalizer_array;
932
933 rb_gc_run_obj_finalizer(
934 job->as.finalize.object_id,
935 RARRAY_LEN(finalizer_array),
936 gc_run_finalizers_get_final,
937 (void *)finalizer_array
938 );
939
940 RB_GC_GUARD(object_id);
941 RB_GC_GUARD(finalizer_array);
942 break;
943 }
944 }
945
946 xfree(job);
947 }
948
949 rb_gc_unset_pending_interrupt();
950}
951
952void
953rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
954{
955 if (dfree == NULL) return;
956
957 struct objspace *objspace = objspace_ptr;
958
959 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
960 job->kind = MMTK_FINAL_JOB_DFREE;
961 job->as.dfree.func = dfree;
962 job->as.dfree.data = data;
963
964 struct MMTk_final_job *prev;
965 do {
966 job->next = objspace->finalizer_jobs;
967 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
968 } while (prev != job->next);
969
970 if (!ruby_free_at_exit_p()) {
971 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
972 }
973}
974
975VALUE
976rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
977{
978 struct objspace *objspace = objspace_ptr;
979 VALUE table;
980 st_data_t data;
981
982 RBASIC(obj)->flags |= FL_FINALIZE;
983
984 if (st_lookup(objspace->finalizer_table, obj, &data)) {
985 table = (VALUE)data;
986
987 /* avoid duplicate block, table is usually small */
988 {
989 long len = RARRAY_LEN(table);
990 long i;
991
992 for (i = 0; i < len; i++) {
993 VALUE recv = RARRAY_AREF(table, i);
994 if (rb_equal(recv, block)) {
995 return recv;
996 }
997 }
998 }
999
1000 rb_ary_push(table, block);
1001 }
1002 else {
1003 table = rb_ary_new3(1, block);
1004 rb_obj_hide(table);
1005 st_add_direct(objspace->finalizer_table, obj, table);
1006 }
1007
1008 return block;
1009}
1010
1011void
1012rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1013{
1014 struct objspace *objspace = objspace_ptr;
1015
1016 st_data_t data = obj;
1017 st_delete(objspace->finalizer_table, &data, 0);
1018 FL_UNSET(obj, FL_FINALIZE);
1019}
1020
1021void
1022rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1023{
1024 struct objspace *objspace = objspace_ptr;
1025 VALUE table;
1026 st_data_t data;
1027
1028 if (!FL_TEST(obj, FL_FINALIZE)) return;
1029
1030 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1031 table = (VALUE)data;
1032 st_insert(objspace->finalizer_table, dest, table);
1033 FL_SET(dest, FL_FINALIZE);
1034 }
1035 else {
1036 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1037 }
1038}
1039
1040static int
1041move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1042{
1043 struct objspace *objspace = (struct objspace *)arg;
1044
1045 make_final_job(objspace, (VALUE)key, (VALUE)val);
1046
1047 return ST_DELETE;
1048}
1049
1050void
1051rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1052{
1053 struct objspace *objspace = objspace_ptr;
1054
1055 while (objspace->finalizer_table->num_entries) {
1056 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1057
1058 gc_run_finalizers(objspace);
1059 }
1060
1061 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1062 for (size_t i = 0; i < registered_candidates.len; i++) {
1063 VALUE obj = (VALUE)registered_candidates.ptr[i];
1064
1065 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1066 rb_gc_obj_free(objspace_ptr, obj);
1067 RBASIC(obj)->flags = 0;
1068 }
1069 }
1070 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1071
1072 gc_run_finalizers(objspace);
1073}
1074
1075// Object ID
1076static int
1077object_id_cmp(st_data_t x, st_data_t y)
1078{
1079 if (RB_TYPE_P(x, T_BIGNUM)) {
1080 return !rb_big_eql(x, y);
1081 }
1082 else {
1083 return x != y;
1084 }
1085}
1086
1087static st_index_t
1088object_id_hash(st_data_t n)
1089{
1090 return FIX2LONG(rb_hash((VALUE)n));
1091}
1092
1093#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1094#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1095
1096static const struct st_hash_type object_id_hash_type = {
1097 object_id_cmp,
1098 object_id_hash,
1099};
1100
1101static void
1102objspace_obj_id_init(struct objspace *objspace)
1103{
1104 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
1105 objspace->obj_to_id_tbl = st_init_numtable();
1106 objspace->next_object_id = OBJ_ID_INITIAL;
1107}
1108
1109VALUE
1110rb_gc_impl_object_id(void *objspace_ptr, VALUE obj)
1111{
1112 struct objspace *objspace = objspace_ptr;
1113
1114 unsigned int lev = rb_gc_vm_lock();
1115
1116 VALUE id;
1117 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
1119 }
1120 else {
1122
1123 id = ULL2NUM(objspace->next_object_id);
1124 objspace->next_object_id += OBJ_ID_INCREMENT;
1125
1126 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
1127 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
1128 FL_SET(obj, FL_SEEN_OBJ_ID);
1129 }
1130
1131 rb_gc_vm_unlock(lev);
1132
1133 return id;
1134}
1135
1136VALUE
1137rb_gc_impl_object_id_to_ref(void *objspace_ptr, VALUE object_id)
1138{
1139 struct objspace *objspace = objspace_ptr;
1140
1141 VALUE obj;
1142 if (st_lookup(objspace->id_to_obj_tbl, object_id, &obj) &&
1143 !rb_gc_impl_garbage_object_p(objspace, obj)) {
1144 return obj;
1145 }
1146
1147 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(objspace->next_object_id))) {
1148 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1149 }
1150 else {
1151 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1152 }
1153}
1154
1155// Forking
1156
1157void
1158rb_gc_impl_before_fork(void *objspace_ptr)
1159{
1160 mmtk_before_fork();
1161}
1162
1163void
1164rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1165{
1166 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1167}
1168
1169// Statistics
1170
1171void
1172rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1173{
1174 struct objspace *objspace = objspace_ptr;
1175
1176 objspace->measure_gc_time = RTEST(flag);
1177}
1178
1179bool
1180rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1181{
1182 struct objspace *objspace = objspace_ptr;
1183
1184 return objspace->measure_gc_time;
1185}
1186
1187unsigned long long
1188rb_gc_impl_get_total_time(void *objspace_ptr)
1189{
1190 struct objspace *objspace = objspace_ptr;
1191
1192 return objspace->total_gc_time;
1193}
1194
1195size_t
1196rb_gc_impl_gc_count(void *objspace_ptr)
1197{
1198 struct objspace *objspace = objspace_ptr;
1199
1200 return objspace->gc_count;
1201}
1202
1203VALUE
1204rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1205{
1206 VALUE hash = Qnil, key = Qnil;
1207
1208 if (SYMBOL_P(hash_or_key)) {
1209 key = hash_or_key;
1210 }
1211 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1212 hash = hash_or_key;
1213 }
1214 else {
1215 rb_bug("gc_info_decode: non-hash or symbol given");
1216 }
1217
1218#define SET(name, attr) \
1219 if (key == ID2SYM(rb_intern_const(#name))) \
1220 return (attr); \
1221 else if (hash != Qnil) \
1222 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1223
1224 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1225 * the :state key and expects a result. This always returns the :none state. */
1226 SET(state, ID2SYM(rb_intern_const("none")));
1227#undef SET
1228
1229 if (!NIL_P(key)) {
1230 // Matched key should return above
1231 return Qundef;
1232 }
1233
1234 return hash;
1235}
1236
1237enum gc_stat_sym {
1238 gc_stat_sym_count,
1239 gc_stat_sym_time,
1240 gc_stat_sym_total_allocated_objects,
1241 gc_stat_sym_total_bytes,
1242 gc_stat_sym_used_bytes,
1243 gc_stat_sym_free_bytes,
1244 gc_stat_sym_starting_heap_address,
1245 gc_stat_sym_last_heap_address,
1246 gc_stat_sym_last
1247};
1248
1249static VALUE gc_stat_symbols[gc_stat_sym_last];
1250
1251static void
1252setup_gc_stat_symbols(void)
1253{
1254 if (gc_stat_symbols[0] == 0) {
1255#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1256 S(count);
1257 S(time);
1258 S(total_allocated_objects);
1259 S(total_bytes);
1260 S(used_bytes);
1261 S(free_bytes);
1262 S(starting_heap_address);
1263 S(last_heap_address);
1264 }
1265}
1266
1267VALUE
1268rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1269{
1270 struct objspace *objspace = objspace_ptr;
1271 VALUE hash = Qnil, key = Qnil;
1272
1273 setup_gc_stat_symbols();
1274
1275 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1276 hash = hash_or_sym;
1277 }
1278 else if (SYMBOL_P(hash_or_sym)) {
1279 key = hash_or_sym;
1280 }
1281 else {
1282 rb_bug("non-hash or symbol given");
1283 }
1284
1285#define SET(name, attr) \
1286 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1287 return SIZET2NUM(attr); \
1288 else if (hash != Qnil) \
1289 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1290
1291 SET(count, objspace->gc_count);
1292 SET(time, objspace->total_gc_time / (1000 * 1000));
1293 SET(total_allocated_objects, objspace->total_allocated_objects);
1294 SET(total_bytes, mmtk_total_bytes());
1295 SET(used_bytes, mmtk_used_bytes());
1296 SET(free_bytes, mmtk_free_bytes());
1297 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1298 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1299#undef SET
1300
1301 if (!NIL_P(key)) {
1302 // Matched key should return above
1303 return Qundef;
1304 }
1305
1306 return hash;
1307}
1308
1309VALUE
1310rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1311{
1312 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1313 return hash_or_sym;
1314 }
1315 else {
1316 return Qundef;
1317 }
1318}
1319
1320// Miscellaneous
1321size_t
1322rb_gc_impl_obj_flags(void *objspace_ptr, VALUE obj, ID* flags, size_t max)
1323{
1324 return 0;
1325}
1326
1327bool
1328rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1329{
1330 if (ptr == NULL) return false;
1331 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1332 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1333}
1334
1335bool
1336rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1337{
1338 return false;
1339}
1340
1341void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1342
1343void
1344rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1345{
1346 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1347 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1348 }
1349
1350 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1351}
1352
1353// GC Identification
1354
1355const char *
1356rb_gc_impl_active_gc_name(void)
1357{
1358 return "mmtk";
1359}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:315
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:665
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1786
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1752
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:495
static void RB_FL_UNSET(VALUE obj, VALUE flags)
Clears the given flag(s).
Definition fl_type.h:681
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:239
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:65
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_mGC
GC module.
Definition gc.c:420
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:179
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:481
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
int len
Length of the buffer.
Definition io.h:8
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5556
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:45
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:41
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376