Ruby 3.5.0dev (2025-10-25 revision c6d1458421796786d26e084b48a0a4a7e3b40867)
gc.c (c6d1458421796786d26e084b48a0a4a7e3b40867)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/concurrent_set.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "variable.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129#include "yjit.h"
130#include "zjit.h"
131
132#include "builtin.h"
133#include "shape.h"
134
135unsigned int
136rb_gc_vm_lock(const char *file, int line)
137{
138 unsigned int lev = 0;
139 rb_vm_lock_enter(&lev, file, line);
140 return lev;
141}
142
143void
144rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
145{
146 rb_vm_lock_leave(&lev, file, line);
147}
148
149unsigned int
150rb_gc_cr_lock(const char *file, int line)
151{
152 unsigned int lev;
153 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
154 return lev;
155}
156
157void
158rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
159{
160 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
161}
162
163unsigned int
164rb_gc_vm_lock_no_barrier(const char *file, int line)
165{
166 unsigned int lev = 0;
167 rb_vm_lock_enter_nb(&lev, file, line);
168 return lev;
169}
170
171void
172rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
173{
174 rb_vm_lock_leave_nb(&lev, file, line);
175}
176
177void
178rb_gc_vm_barrier(void)
179{
180 rb_vm_barrier();
181}
182
183#if USE_MODULAR_GC
184void *
185rb_gc_get_ractor_newobj_cache(void)
186{
187 return GET_RACTOR()->newobj_cache;
188}
189
190void
191rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
192{
193 rb_native_mutex_initialize(&context->lock);
194 context->ec = GET_EC();
195}
196
197void
198rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
199{
200 rb_native_mutex_lock(&context->lock);
201
202 GC_ASSERT(rb_current_execution_context(false) == NULL);
203
204#ifdef RB_THREAD_LOCAL_SPECIFIER
205 rb_current_ec_set(context->ec);
206#else
207 native_tls_set(ruby_current_ec_key, context->ec);
208#endif
209}
210
211void
212rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
213{
214 rb_native_mutex_unlock(&context->lock);
215
216 GC_ASSERT(rb_current_execution_context(true) == context->ec);
217
218#ifdef RB_THREAD_LOCAL_SPECIFIER
219 rb_current_ec_set(NULL);
220#else
221 native_tls_set(ruby_current_ec_key, NULL);
222#endif
223}
224#endif
225
226bool
227rb_gc_event_hook_required_p(rb_event_flag_t event)
228{
229 return ruby_vm_event_flags & event;
230}
231
232void
233rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
234{
235 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
236
237 rb_execution_context_t *ec = GET_EC();
238 if (!ec->cfp) return;
239
240 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
241}
242
243void *
244rb_gc_get_objspace(void)
245{
246 return GET_VM()->gc.objspace;
247}
248
249
250void
251rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
252{
253 rb_ractor_t *r = NULL;
254 if (RB_LIKELY(ruby_single_main_ractor)) {
255 GC_ASSERT(
256 ccan_list_empty(&GET_VM()->ractor.set) ||
257 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
258 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
259 );
260
261 func(ruby_single_main_ractor->newobj_cache, data);
262 }
263 else {
264 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
265 func(r->newobj_cache, data);
266 }
267 }
268}
269
270void
271rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
272{
273 volatile struct {
274 VALUE errinfo;
275 VALUE final;
277 VALUE *sp;
278 long finished;
279 } saved;
280
281 rb_execution_context_t * volatile ec = GET_EC();
282#define RESTORE_FINALIZER() (\
283 ec->cfp = saved.cfp, \
284 ec->cfp->sp = saved.sp, \
285 ec->errinfo = saved.errinfo)
286
287 saved.errinfo = ec->errinfo;
288 saved.cfp = ec->cfp;
289 saved.sp = ec->cfp->sp;
290 saved.finished = 0;
291 saved.final = Qundef;
292
293 rb_ractor_ignore_belonging(true);
294 EC_PUSH_TAG(ec);
295 enum ruby_tag_type state = EC_EXEC_TAG();
296 if (state != TAG_NONE) {
297 ++saved.finished; /* skip failed finalizer */
298
299 VALUE failed_final = saved.final;
300 saved.final = Qundef;
301 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
302 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
303 rb_ec_error_print(ec, ec->errinfo);
304 }
305 }
306
307 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
308 saved.final = callback(i, data);
309 rb_check_funcall(saved.final, idCall, 1, &objid);
310 }
311 EC_POP_TAG();
312 rb_ractor_ignore_belonging(false);
313#undef RESTORE_FINALIZER
314}
315
316void
317rb_gc_set_pending_interrupt(void)
318{
319 rb_execution_context_t *ec = GET_EC();
320 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
321}
322
323void
324rb_gc_unset_pending_interrupt(void)
325{
326 rb_execution_context_t *ec = GET_EC();
327 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
328}
329
330bool
331rb_gc_multi_ractor_p(void)
332{
333 return rb_multi_ractor_p();
334}
335
336bool
337rb_gc_shutdown_call_finalizer_p(VALUE obj)
338{
339 switch (BUILTIN_TYPE(obj)) {
340 case T_DATA:
341 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
342 if (rb_obj_is_thread(obj)) return false;
343 if (rb_obj_is_mutex(obj)) return false;
344 if (rb_obj_is_fiber(obj)) return false;
345 if (rb_ractor_p(obj)) return false;
346 if (rb_obj_is_fstring_table(obj)) return false;
347 if (rb_obj_is_symbol_table(obj)) return false;
348
349 return true;
350
351 case T_FILE:
352 return true;
353
354 case T_SYMBOL:
355 return true;
356
357 case T_NONE:
358 return false;
359
360 default:
361 return ruby_free_at_exit_p();
362 }
363}
364
365uint32_t
366rb_gc_get_shape(VALUE obj)
367{
368 return (uint32_t)rb_obj_shape_id(obj);
369}
370
371void
372rb_gc_set_shape(VALUE obj, uint32_t shape_id)
373{
374 RBASIC_SET_SHAPE_ID(obj, (uint32_t)shape_id);
375}
376
377uint32_t
378rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
379{
381
382 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
383}
384
385void rb_vm_update_references(void *ptr);
386
387#define rb_setjmp(env) RUBY_SETJMP(env)
388#define rb_jmp_buf rb_jmpbuf_t
389#undef rb_data_object_wrap
390
391#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
392#define MAP_ANONYMOUS MAP_ANON
393#endif
394
395#define unless_objspace(objspace) \
396 void *objspace; \
397 rb_vm_t *unless_objspace_vm = GET_VM(); \
398 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
399 else /* return; or objspace will be warned uninitialized */
400
401#define RMOVED(obj) ((struct RMoved *)(obj))
402
403#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
404 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
405 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
406 } \
407} while (0)
408
409#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
410
411#if RUBY_MARK_FREE_DEBUG
412int ruby_gc_debug_indent = 0;
413#endif
414
415#ifndef RGENGC_OBJ_INFO
416# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
417#endif
418
419#ifndef CALC_EXACT_MALLOC_SIZE
420# define CALC_EXACT_MALLOC_SIZE 0
421#endif
422
424
425static size_t malloc_offset = 0;
426#if defined(HAVE_MALLOC_USABLE_SIZE)
427static size_t
428gc_compute_malloc_offset(void)
429{
430 // Different allocators use different metadata storage strategies which result in different
431 // ideal sizes.
432 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
433 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
434 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
435 // waste memory.
436 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
437 // no wasted memory.
438 size_t offset = 0;
439 for (offset = 0; offset <= 16; offset += 8) {
440 size_t allocated = (64 - offset);
441 void *test_ptr = malloc(allocated);
442 size_t wasted = malloc_usable_size(test_ptr) - allocated;
443 free(test_ptr);
444
445 if (wasted == 0) {
446 return offset;
447 }
448 }
449 return 0;
450}
451#else
452static size_t
453gc_compute_malloc_offset(void)
454{
455 // If we don't have malloc_usable_size, we use powers of 2.
456 return 0;
457}
458#endif
459
460size_t
461rb_malloc_grow_capa(size_t current, size_t type_size)
462{
463 size_t current_capacity = current;
464 if (current_capacity < 4) {
465 current_capacity = 4;
466 }
467 current_capacity *= type_size;
468
469 // We double the current capacity.
470 size_t new_capacity = (current_capacity * 2);
471
472 // And round up to the next power of 2 if it's not already one.
473 if (rb_popcount64(new_capacity) != 1) {
474 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
475 }
476
477 new_capacity -= malloc_offset;
478 new_capacity /= type_size;
479 if (current > new_capacity) {
480 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
481 }
482 RUBY_ASSERT(new_capacity > current);
483 return new_capacity;
484}
485
486static inline struct rbimpl_size_mul_overflow_tag
487size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
488{
489 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
490 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
491 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
492}
493
494static inline struct rbimpl_size_mul_overflow_tag
495size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
496{
497 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
498 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
499 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
500 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
501}
502
503PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
504
505static inline size_t
506size_mul_or_raise(size_t x, size_t y, VALUE exc)
507{
508 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
509 if (LIKELY(!t.left)) {
510 return t.right;
511 }
512 else if (rb_during_gc()) {
513 rb_memerror(); /* or...? */
514 }
515 else {
516 gc_raise(
517 exc,
518 "integer overflow: %"PRIuSIZE
519 " * %"PRIuSIZE
520 " > %"PRIuSIZE,
521 x, y, (size_t)SIZE_MAX);
522 }
523}
524
525size_t
526rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
527{
528 return size_mul_or_raise(x, y, exc);
529}
530
531static inline size_t
532size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
533{
534 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
535 if (LIKELY(!t.left)) {
536 return t.right;
537 }
538 else if (rb_during_gc()) {
539 rb_memerror(); /* or...? */
540 }
541 else {
542 gc_raise(
543 exc,
544 "integer overflow: %"PRIuSIZE
545 " * %"PRIuSIZE
546 " + %"PRIuSIZE
547 " > %"PRIuSIZE,
548 x, y, z, (size_t)SIZE_MAX);
549 }
550}
551
552size_t
553rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
554{
555 return size_mul_add_or_raise(x, y, z, exc);
556}
557
558static inline size_t
559size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
560{
561 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
562 if (LIKELY(!t.left)) {
563 return t.right;
564 }
565 else if (rb_during_gc()) {
566 rb_memerror(); /* or...? */
567 }
568 else {
569 gc_raise(
570 exc,
571 "integer overflow: %"PRIdSIZE
572 " * %"PRIdSIZE
573 " + %"PRIdSIZE
574 " * %"PRIdSIZE
575 " > %"PRIdSIZE,
576 x, y, z, w, (size_t)SIZE_MAX);
577 }
578}
579
580#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
581/* trick the compiler into thinking a external signal handler uses this */
582volatile VALUE rb_gc_guarded_val;
583volatile VALUE *
584rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
585{
586 rb_gc_guarded_val = val;
587
588 return ptr;
589}
590#endif
591
592static const char *obj_type_name(VALUE obj);
593#include "gc/default/default.c"
594
595#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
596# error "Modular GC requires dlopen"
597#elif USE_MODULAR_GC
598#include <dlfcn.h>
599
600typedef struct gc_function_map {
601 // Bootup
602 void *(*objspace_alloc)(void);
603 void (*objspace_init)(void *objspace_ptr);
604 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
605 void (*set_params)(void *objspace_ptr);
606 void (*init)(void);
607 size_t *(*heap_sizes)(void *objspace_ptr);
608 // Shutdown
609 void (*shutdown_free_objects)(void *objspace_ptr);
610 void (*objspace_free)(void *objspace_ptr);
611 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
612 // GC
613 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
614 bool (*during_gc_p)(void *objspace_ptr);
615 void (*prepare_heap)(void *objspace_ptr);
616 void (*gc_enable)(void *objspace_ptr);
617 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
618 bool (*gc_enabled_p)(void *objspace_ptr);
619 VALUE (*config_get)(void *objpace_ptr);
620 void (*config_set)(void *objspace_ptr, VALUE hash);
621 void (*stress_set)(void *objspace_ptr, VALUE flag);
622 VALUE (*stress_get)(void *objspace_ptr);
623 // Object allocation
624 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
625 size_t (*obj_slot_size)(VALUE obj);
626 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
627 bool (*size_allocatable_p)(size_t size);
628 // Malloc
629 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
630 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
631 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
632 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
633 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
634 // Marking
635 void (*mark)(void *objspace_ptr, VALUE obj);
636 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
637 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
638 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
639 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
640 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
641 // Compaction
642 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
643 VALUE (*location)(void *objspace_ptr, VALUE value);
644 // Write barriers
645 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
646 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
647 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
648 // Heap walking
649 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
650 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
651 // Finalizers
652 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
653 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
654 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
655 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
656 void (*shutdown_call_finalizer)(void *objspace_ptr);
657 // Forking
658 void (*before_fork)(void *objspace_ptr);
659 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
660 // Statistics
661 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
662 bool (*get_measure_total_time)(void *objspace_ptr);
663 unsigned long long (*get_total_time)(void *objspace_ptr);
664 size_t (*gc_count)(void *objspace_ptr);
665 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
666 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
667 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
668 const char *(*active_gc_name)(void);
669 // Miscellaneous
670 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
671 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
672 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
673 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
674 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
675
676 bool modular_gc_loaded_p;
677} rb_gc_function_map_t;
678
679static rb_gc_function_map_t rb_gc_functions;
680
681# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
682# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
683
684static void
685ruby_modular_gc_init(void)
686{
687 // Assert that the directory path ends with a /
688 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
689
690 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
691
692 rb_gc_function_map_t gc_functions = { 0 };
693
694 char *gc_so_path = NULL;
695 void *handle = NULL;
696 if (gc_so_file) {
697 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
698 * not load a shared object outside of the directory. */
699 for (size_t i = 0; i < strlen(gc_so_file); i++) {
700 char c = gc_so_file[i];
701 if (isalnum(c)) continue;
702 switch (c) {
703 case '-':
704 case '_':
705 break;
706 default:
707 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
708 exit(1);
709 }
710 }
711
712 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
713#ifdef LOAD_RELATIVE
714 Dl_info dli;
715 size_t prefix_len = 0;
716 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
717 const char *base = strrchr(dli.dli_fname, '/');
718 if (base) {
719 size_t tail = 0;
720# define end_with_p(lit) \
721 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
722 memcmp(base - tail, lit, tail) == 0)
723
724 prefix_len = base - dli.dli_fname;
725 if (end_with_p("/bin") || end_with_p("/lib")) {
726 prefix_len -= tail;
727 }
728 prefix_len += MODULAR_GC_DIR[0] != '/';
729 gc_so_path_size += prefix_len;
730 }
731 }
732#endif
733 gc_so_path = alloca(gc_so_path_size);
734 {
735 size_t gc_so_path_idx = 0;
736#define GC_SO_PATH_APPEND(str) do { \
737 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
738} while (0)
739#ifdef LOAD_RELATIVE
740 if (prefix_len > 0) {
741 memcpy(gc_so_path, dli.dli_fname, prefix_len);
742 gc_so_path_idx = prefix_len;
743 }
744#endif
745 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
746 GC_SO_PATH_APPEND(gc_so_file);
747 GC_SO_PATH_APPEND(DLEXT);
748 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
749#undef GC_SO_PATH_APPEND
750 }
751
752 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
753 if (!handle) {
754 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
755 exit(1);
756 }
757
758 gc_functions.modular_gc_loaded_p = true;
759 }
760
761# define load_modular_gc_func(name) do { \
762 if (handle) { \
763 const char *func_name = "rb_gc_impl_" #name; \
764 gc_functions.name = dlsym(handle, func_name); \
765 if (!gc_functions.name) { \
766 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
767 exit(1); \
768 } \
769 } \
770 else { \
771 gc_functions.name = rb_gc_impl_##name; \
772 } \
773} while (0)
774
775 // Bootup
776 load_modular_gc_func(objspace_alloc);
777 load_modular_gc_func(objspace_init);
778 load_modular_gc_func(ractor_cache_alloc);
779 load_modular_gc_func(set_params);
780 load_modular_gc_func(init);
781 load_modular_gc_func(heap_sizes);
782 // Shutdown
783 load_modular_gc_func(shutdown_free_objects);
784 load_modular_gc_func(objspace_free);
785 load_modular_gc_func(ractor_cache_free);
786 // GC
787 load_modular_gc_func(start);
788 load_modular_gc_func(during_gc_p);
789 load_modular_gc_func(prepare_heap);
790 load_modular_gc_func(gc_enable);
791 load_modular_gc_func(gc_disable);
792 load_modular_gc_func(gc_enabled_p);
793 load_modular_gc_func(config_set);
794 load_modular_gc_func(config_get);
795 load_modular_gc_func(stress_set);
796 load_modular_gc_func(stress_get);
797 // Object allocation
798 load_modular_gc_func(new_obj);
799 load_modular_gc_func(obj_slot_size);
800 load_modular_gc_func(heap_id_for_size);
801 load_modular_gc_func(size_allocatable_p);
802 // Malloc
803 load_modular_gc_func(malloc);
804 load_modular_gc_func(calloc);
805 load_modular_gc_func(realloc);
806 load_modular_gc_func(free);
807 load_modular_gc_func(adjust_memory_usage);
808 // Marking
809 load_modular_gc_func(mark);
810 load_modular_gc_func(mark_and_move);
811 load_modular_gc_func(mark_and_pin);
812 load_modular_gc_func(mark_maybe);
813 load_modular_gc_func(mark_weak);
814 load_modular_gc_func(remove_weak);
815 // Compaction
816 load_modular_gc_func(object_moved_p);
817 load_modular_gc_func(location);
818 // Write barriers
819 load_modular_gc_func(writebarrier);
820 load_modular_gc_func(writebarrier_unprotect);
821 load_modular_gc_func(writebarrier_remember);
822 // Heap walking
823 load_modular_gc_func(each_objects);
824 load_modular_gc_func(each_object);
825 // Finalizers
826 load_modular_gc_func(make_zombie);
827 load_modular_gc_func(define_finalizer);
828 load_modular_gc_func(undefine_finalizer);
829 load_modular_gc_func(copy_finalizer);
830 load_modular_gc_func(shutdown_call_finalizer);
831 // Forking
832 load_modular_gc_func(before_fork);
833 load_modular_gc_func(after_fork);
834 // Statistics
835 load_modular_gc_func(set_measure_total_time);
836 load_modular_gc_func(get_measure_total_time);
837 load_modular_gc_func(get_total_time);
838 load_modular_gc_func(gc_count);
839 load_modular_gc_func(latest_gc_info);
840 load_modular_gc_func(stat);
841 load_modular_gc_func(stat_heap);
842 load_modular_gc_func(active_gc_name);
843 // Miscellaneous
844 load_modular_gc_func(object_metadata);
845 load_modular_gc_func(pointer_to_heap_p);
846 load_modular_gc_func(garbage_object_p);
847 load_modular_gc_func(set_event_hook);
848 load_modular_gc_func(copy_attributes);
849
850# undef load_modular_gc_func
851
852 rb_gc_functions = gc_functions;
853}
854
855// Bootup
856# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
857# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
858# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
859# define rb_gc_impl_set_params rb_gc_functions.set_params
860# define rb_gc_impl_init rb_gc_functions.init
861# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
862// Shutdown
863# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
864# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
865# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
866// GC
867# define rb_gc_impl_start rb_gc_functions.start
868# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
869# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
870# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
871# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
872# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
873# define rb_gc_impl_config_get rb_gc_functions.config_get
874# define rb_gc_impl_config_set rb_gc_functions.config_set
875# define rb_gc_impl_stress_set rb_gc_functions.stress_set
876# define rb_gc_impl_stress_get rb_gc_functions.stress_get
877// Object allocation
878# define rb_gc_impl_new_obj rb_gc_functions.new_obj
879# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
880# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
881# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
882// Malloc
883# define rb_gc_impl_malloc rb_gc_functions.malloc
884# define rb_gc_impl_calloc rb_gc_functions.calloc
885# define rb_gc_impl_realloc rb_gc_functions.realloc
886# define rb_gc_impl_free rb_gc_functions.free
887# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
888// Marking
889# define rb_gc_impl_mark rb_gc_functions.mark
890# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
891# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
892# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
893# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
894# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
895// Compaction
896# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
897# define rb_gc_impl_location rb_gc_functions.location
898// Write barriers
899# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
900# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
901# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
902// Heap walking
903# define rb_gc_impl_each_objects rb_gc_functions.each_objects
904# define rb_gc_impl_each_object rb_gc_functions.each_object
905// Finalizers
906# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
907# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
908# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
909# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
910# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
911// Forking
912# define rb_gc_impl_before_fork rb_gc_functions.before_fork
913# define rb_gc_impl_after_fork rb_gc_functions.after_fork
914// Statistics
915# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
916# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
917# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
918# define rb_gc_impl_gc_count rb_gc_functions.gc_count
919# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
920# define rb_gc_impl_stat rb_gc_functions.stat
921# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
922# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
923// Miscellaneous
924# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
925# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
926# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
927# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
928# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
929#endif
930
931#ifdef RUBY_ASAN_ENABLED
932static void
933asan_death_callback(void)
934{
935 if (GET_VM()) {
936 rb_bug_without_die("ASAN error");
937 }
938}
939#endif
940
941static VALUE initial_stress = Qfalse;
942
943void *
944rb_objspace_alloc(void)
945{
946#if USE_MODULAR_GC
947 ruby_modular_gc_init();
948#endif
949
950 void *objspace = rb_gc_impl_objspace_alloc();
951 ruby_current_vm_ptr->gc.objspace = objspace;
952 rb_gc_impl_objspace_init(objspace);
953 rb_gc_impl_stress_set(objspace, initial_stress);
954
955#ifdef RUBY_ASAN_ENABLED
956 __sanitizer_set_death_callback(asan_death_callback);
957#endif
958
959 return objspace;
960}
961
962void
963rb_objspace_free(void *objspace)
964{
965 rb_gc_impl_objspace_free(objspace);
966}
967
968size_t
969rb_gc_obj_slot_size(VALUE obj)
970{
971 return rb_gc_impl_obj_slot_size(obj);
972}
973
974static inline void
975gc_validate_pc(VALUE obj)
976{
977#if RUBY_DEBUG
978 // IMEMOs and objects without a class (e.g managed id table) are not traceable
979 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
980
981 rb_execution_context_t *ec = GET_EC();
982 const rb_control_frame_t *cfp = ec->cfp;
983 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
984 const VALUE *iseq_encoded = ISEQ_BODY(cfp->iseq)->iseq_encoded;
985 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size;
986 RUBY_ASSERT(cfp->pc >= iseq_encoded, "PC not set when allocating, breaking tracing");
987 RUBY_ASSERT(cfp->pc <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
988 }
989#endif
990}
991
992static inline VALUE
993newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, bool wb_protected, size_t size)
994{
995 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
996
997 gc_validate_pc(obj);
998
999 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1000 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1001 {
1002 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1003
1004 /* We must disable GC here because the callback could call xmalloc
1005 * which could potentially trigger a GC, and a lot of code is unsafe
1006 * to trigger a GC right after an object has been allocated because
1007 * they perform initialization for the object and assume that the
1008 * GC does not trigger before then. */
1009 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1010 {
1011 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1012 }
1013 if (!gc_disabled) rb_gc_enable();
1014 }
1015 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1016 }
1017
1018#if RGENGC_CHECK_MODE
1019# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1020# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1021# endif
1022
1023 memset(
1024 (void *)(obj + sizeof(struct RBasic)),
1025 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1026 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1027 );
1028#endif
1029
1030 return obj;
1031}
1032
1033VALUE
1034rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1035{
1036 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1037 return newobj_of(GET_RACTOR(), klass, flags, FALSE, size);
1038}
1039
1040VALUE
1041rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1042{
1043 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1044 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, TRUE, size);
1045}
1046
1047#define UNEXPECTED_NODE(func) \
1048 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1049 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1050
1051static inline void
1052rb_data_object_check(VALUE klass)
1053{
1054 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1055 rb_undef_alloc_func(klass);
1056 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1057 }
1058}
1059
1060VALUE
1061rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1062{
1064 if (klass) rb_data_object_check(klass);
1065 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA, !dmark, sizeof(struct RTypedData));
1066
1067 struct RData *data = (struct RData *)obj;
1068 data->dmark = dmark;
1069 data->dfree = dfree;
1070 data->data = datap;
1071
1072 return obj;
1073}
1074
1075VALUE
1077{
1078 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1079 DATA_PTR(obj) = xcalloc(1, size);
1080 return obj;
1081}
1082
1083static VALUE
1084typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1085{
1086 RBIMPL_NONNULL_ARG(type);
1087 if (klass) rb_data_object_check(klass);
1088 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1089 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, wb_protected, size);
1090
1091 struct RTypedData *data = (struct RTypedData *)obj;
1092 data->fields_obj = 0;
1093 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1094 data->data = datap;
1095
1096 return obj;
1097}
1098
1099VALUE
1101{
1102 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1103 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1104 }
1105
1106 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1107}
1108
1109VALUE
1111{
1112 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1113 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1114 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1115 }
1116
1117 size_t embed_size = offsetof(struct RTypedData, data) + size;
1118 if (rb_gc_size_allocatable_p(embed_size)) {
1119 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1120 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1121 return obj;
1122 }
1123 }
1124
1125 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1126 DATA_PTR(obj) = xcalloc(1, size);
1127 return obj;
1128}
1129
1130static size_t
1131rb_objspace_data_type_memsize(VALUE obj)
1132{
1133 size_t size = 0;
1134 if (RTYPEDDATA_P(obj)) {
1135 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1136 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1137
1138 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1139#ifdef HAVE_MALLOC_USABLE_SIZE
1140 size += malloc_usable_size((void *)ptr);
1141#endif
1142 }
1143
1144 if (ptr && type->function.dsize) {
1145 size += type->function.dsize(ptr);
1146 }
1147 }
1148
1149 return size;
1150}
1151
1152const char *
1153rb_objspace_data_type_name(VALUE obj)
1154{
1155 if (RTYPEDDATA_P(obj)) {
1156 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1157 }
1158 else {
1159 return 0;
1160 }
1161}
1162
1163static void
1164io_fptr_finalize(void *fptr)
1165{
1166 rb_io_fptr_finalize((struct rb_io *)fptr);
1167}
1168
1169static inline void
1170make_io_zombie(void *objspace, VALUE obj)
1171{
1172 rb_io_t *fptr = RFILE(obj)->fptr;
1173 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1174}
1175
1176static bool
1177rb_data_free(void *objspace, VALUE obj)
1178{
1179 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1180 if (data) {
1181 int free_immediately = false;
1182 void (*dfree)(void *);
1183
1184 if (RTYPEDDATA_P(obj)) {
1185 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1186 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1187 }
1188 else {
1189 dfree = RDATA(obj)->dfree;
1190 }
1191
1192 if (dfree) {
1193 if (dfree == RUBY_DEFAULT_FREE) {
1194 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1195 xfree(data);
1196 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1197 }
1198 }
1199 else if (free_immediately) {
1200 (*dfree)(data);
1201 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1202 xfree(data);
1203 }
1204
1205 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1206 }
1207 else {
1208 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1209 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1210 return FALSE;
1211 }
1212 }
1213 else {
1214 RB_DEBUG_COUNTER_INC(obj_data_empty);
1215 }
1216 }
1217
1218 return true;
1219}
1220
1222 VALUE klass;
1223 rb_objspace_t *objspace; // used for update_*
1224};
1225
1226static void
1227classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1228{
1229 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1230
1231 rb_class_classext_free(args->klass, ext, is_prime);
1232}
1233
1234static void
1235classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1236{
1237 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1238
1239 rb_iclass_classext_free(args->klass, ext, is_prime);
1240}
1241
1242bool
1243rb_gc_obj_free(void *objspace, VALUE obj)
1244{
1245 struct classext_foreach_args args;
1246
1247 RB_DEBUG_COUNTER_INC(obj_free);
1248
1249 switch (BUILTIN_TYPE(obj)) {
1250 case T_NIL:
1251 case T_FIXNUM:
1252 case T_TRUE:
1253 case T_FALSE:
1254 rb_bug("obj_free() called for broken object");
1255 break;
1256 default:
1257 break;
1258 }
1259
1260 switch (BUILTIN_TYPE(obj)) {
1261 case T_OBJECT:
1262 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1263 if (rb_shape_obj_too_complex_p(obj)) {
1264 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1265 st_free_table(ROBJECT_FIELDS_HASH(obj));
1266 }
1267 else {
1268 xfree(ROBJECT(obj)->as.heap.fields);
1269 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1270 }
1271 }
1272 else {
1273 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1274 }
1275 break;
1276 case T_MODULE:
1277 case T_CLASS:
1278#if USE_ZJIT
1279 rb_zjit_klass_free(obj);
1280#endif
1281 args.klass = obj;
1282 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1283 if (RCLASS_CLASSEXT_TBL(obj)) {
1284 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1285 }
1286 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1287 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1288 break;
1289 case T_STRING:
1290 rb_str_free(obj);
1291 break;
1292 case T_ARRAY:
1293 rb_ary_free(obj);
1294 break;
1295 case T_HASH:
1296#if USE_DEBUG_COUNTER
1297 switch (RHASH_SIZE(obj)) {
1298 case 0:
1299 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1300 break;
1301 case 1:
1302 RB_DEBUG_COUNTER_INC(obj_hash_1);
1303 break;
1304 case 2:
1305 RB_DEBUG_COUNTER_INC(obj_hash_2);
1306 break;
1307 case 3:
1308 RB_DEBUG_COUNTER_INC(obj_hash_3);
1309 break;
1310 case 4:
1311 RB_DEBUG_COUNTER_INC(obj_hash_4);
1312 break;
1313 case 5:
1314 case 6:
1315 case 7:
1316 case 8:
1317 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1318 break;
1319 default:
1320 GC_ASSERT(RHASH_SIZE(obj) > 8);
1321 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1322 }
1323
1324 if (RHASH_AR_TABLE_P(obj)) {
1325 if (RHASH_AR_TABLE(obj) == NULL) {
1326 RB_DEBUG_COUNTER_INC(obj_hash_null);
1327 }
1328 else {
1329 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1330 }
1331 }
1332 else {
1333 RB_DEBUG_COUNTER_INC(obj_hash_st);
1334 }
1335#endif
1336
1337 rb_hash_free(obj);
1338 break;
1339 case T_REGEXP:
1340 if (RREGEXP(obj)->ptr) {
1341 onig_free(RREGEXP(obj)->ptr);
1342 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1343 }
1344 break;
1345 case T_DATA:
1346 if (!rb_data_free(objspace, obj)) return false;
1347 break;
1348 case T_MATCH:
1349 {
1350 rb_matchext_t *rm = RMATCH_EXT(obj);
1351#if USE_DEBUG_COUNTER
1352 if (rm->regs.num_regs >= 8) {
1353 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1354 }
1355 else if (rm->regs.num_regs >= 4) {
1356 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1357 }
1358 else if (rm->regs.num_regs >= 1) {
1359 RB_DEBUG_COUNTER_INC(obj_match_under4);
1360 }
1361#endif
1362 onig_region_free(&rm->regs, 0);
1363 xfree(rm->char_offset);
1364
1365 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1366 }
1367 break;
1368 case T_FILE:
1369 if (RFILE(obj)->fptr) {
1370 make_io_zombie(objspace, obj);
1371 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1372 return FALSE;
1373 }
1374 break;
1375 case T_RATIONAL:
1376 RB_DEBUG_COUNTER_INC(obj_rational);
1377 break;
1378 case T_COMPLEX:
1379 RB_DEBUG_COUNTER_INC(obj_complex);
1380 break;
1381 case T_MOVED:
1382 break;
1383 case T_ICLASS:
1384 args.klass = obj;
1385
1386 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1387 if (RCLASS_CLASSEXT_TBL(obj)) {
1388 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1389 }
1390
1391 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1392 break;
1393
1394 case T_FLOAT:
1395 RB_DEBUG_COUNTER_INC(obj_float);
1396 break;
1397
1398 case T_BIGNUM:
1399 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1400 xfree(BIGNUM_DIGITS(obj));
1401 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1402 }
1403 else {
1404 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1405 }
1406 break;
1407
1408 case T_NODE:
1409 UNEXPECTED_NODE(obj_free);
1410 break;
1411
1412 case T_STRUCT:
1413 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1414 RSTRUCT(obj)->as.heap.ptr == NULL) {
1415 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1416 }
1417 else {
1418 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1419 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1420 }
1421 break;
1422
1423 case T_SYMBOL:
1424 RB_DEBUG_COUNTER_INC(obj_symbol);
1425 break;
1426
1427 case T_IMEMO:
1428 rb_imemo_free((VALUE)obj);
1429 break;
1430
1431 default:
1432 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1433 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1434 }
1435
1436 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1437 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1438 return FALSE;
1439 }
1440 else {
1441 return TRUE;
1442 }
1443}
1444
1445void
1446rb_objspace_set_event_hook(const rb_event_flag_t event)
1447{
1448 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1449}
1450
1451static int
1452internal_object_p(VALUE obj)
1453{
1454 void *ptr = asan_unpoison_object_temporary(obj);
1455
1456 if (RBASIC(obj)->flags) {
1457 switch (BUILTIN_TYPE(obj)) {
1458 case T_NODE:
1459 UNEXPECTED_NODE(internal_object_p);
1460 break;
1461 case T_NONE:
1462 case T_MOVED:
1463 case T_IMEMO:
1464 case T_ICLASS:
1465 case T_ZOMBIE:
1466 break;
1467 case T_CLASS:
1468 if (obj == rb_mRubyVMFrozenCore)
1469 return 1;
1470
1471 if (!RBASIC_CLASS(obj)) break;
1472 if (RCLASS_SINGLETON_P(obj)) {
1473 return rb_singleton_class_internal_p(obj);
1474 }
1475 return 0;
1476 default:
1477 if (!RBASIC(obj)->klass) break;
1478 return 0;
1479 }
1480 }
1481 if (ptr || !RBASIC(obj)->flags) {
1482 rb_asan_poison_object(obj);
1483 }
1484 return 1;
1485}
1486
1487int
1488rb_objspace_internal_object_p(VALUE obj)
1489{
1490 return internal_object_p(obj);
1491}
1492
1494 size_t num;
1495 VALUE of;
1496};
1497
1498static int
1499os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1500{
1501 struct os_each_struct *oes = (struct os_each_struct *)data;
1502
1503 VALUE v = (VALUE)vstart;
1504 for (; v != (VALUE)vend; v += stride) {
1505 if (!internal_object_p(v)) {
1506 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1507 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1508 rb_yield(v);
1509 oes->num++;
1510 }
1511 }
1512 }
1513 }
1514
1515 return 0;
1516}
1517
1518static VALUE
1519os_obj_of(VALUE of)
1520{
1521 struct os_each_struct oes;
1522
1523 oes.num = 0;
1524 oes.of = of;
1525 rb_objspace_each_objects(os_obj_of_i, &oes);
1526 return SIZET2NUM(oes.num);
1527}
1528
1529/*
1530 * call-seq:
1531 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1532 * ObjectSpace.each_object([module]) -> an_enumerator
1533 *
1534 * Calls the block once for each living, nonimmediate object in this
1535 * Ruby process. If <i>module</i> is specified, calls the block
1536 * for only those classes or modules that match (or are a subclass of)
1537 * <i>module</i>. Returns the number of objects found. Immediate
1538 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1539 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1540 * never returned. In the example below, #each_object returns both
1541 * the numbers we defined and several constants defined in the Math
1542 * module.
1543 *
1544 * If no block is given, an enumerator is returned instead.
1545 *
1546 * a = 102.7
1547 * b = 95 # Won't be returned
1548 * c = 12345678987654321
1549 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1550 * puts "Total count: #{count}"
1551 *
1552 * <em>produces:</em>
1553 *
1554 * 12345678987654321
1555 * 102.7
1556 * 2.71828182845905
1557 * 3.14159265358979
1558 * 2.22044604925031e-16
1559 * 1.7976931348623157e+308
1560 * 2.2250738585072e-308
1561 * Total count: 7
1562 *
1563 * Due to a current known Ractor implementation issue, this method will not yield
1564 * Ractor-unshareable objects in multi-Ractor mode (when
1565 * <code>Ractor.new</code> has been called within the process at least once).
1566 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1567 *
1568 * a = 12345678987654321 # shareable
1569 * b = [].freeze # shareable
1570 * c = {} # not shareable
1571 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1572 * Ractor.new {} # enter multi-Ractor mode
1573 * ObjectSpace.each_object {|x| x } # does not yield c
1574 *
1575 */
1576
1577static VALUE
1578os_each_obj(int argc, VALUE *argv, VALUE os)
1579{
1580 VALUE of;
1581
1582 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1583 RETURN_ENUMERATOR(os, 1, &of);
1584 return os_obj_of(of);
1585}
1586
1587/*
1588 * call-seq:
1589 * ObjectSpace.undefine_finalizer(obj)
1590 *
1591 * Removes all finalizers for <i>obj</i>.
1592 *
1593 */
1594
1595static VALUE
1596undefine_final(VALUE os, VALUE obj)
1597{
1598 return rb_undefine_finalizer(obj);
1599}
1600
1601VALUE
1602rb_undefine_finalizer(VALUE obj)
1603{
1604 rb_check_frozen(obj);
1605
1606 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1607
1608 return obj;
1609}
1610
1611static void
1612should_be_callable(VALUE block)
1613{
1614 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1615 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1616 rb_obj_class(block));
1617 }
1618}
1619
1620static void
1621should_be_finalizable(VALUE obj)
1622{
1623 if (!FL_ABLE(obj)) {
1624 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1625 rb_obj_classname(obj));
1626 }
1627 rb_check_frozen(obj);
1628}
1629
1630void
1631rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1632{
1633 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1634}
1635
1636/*
1637 * call-seq:
1638 * ObjectSpace.define_finalizer(obj, aProc=proc())
1639 *
1640 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1641 * was destroyed. The object ID of the <i>obj</i> will be passed
1642 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1643 * method, make sure it can be called with a single argument.
1644 *
1645 * The return value is an array <code>[0, aProc]</code>.
1646 *
1647 * The two recommended patterns are to either create the finaliser proc
1648 * in a non-instance method where it can safely capture the needed state,
1649 * or to use a custom callable object that stores the needed state
1650 * explicitly as instance variables.
1651 *
1652 * class Foo
1653 * def initialize(data_needed_for_finalization)
1654 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1655 * end
1656 *
1657 * def self.create_finalizer(data_needed_for_finalization)
1658 * proc {
1659 * puts "finalizing #{data_needed_for_finalization}"
1660 * }
1661 * end
1662 * end
1663 *
1664 * class Bar
1665 * class Remover
1666 * def initialize(data_needed_for_finalization)
1667 * @data_needed_for_finalization = data_needed_for_finalization
1668 * end
1669 *
1670 * def call(id)
1671 * puts "finalizing #{@data_needed_for_finalization}"
1672 * end
1673 * end
1674 *
1675 * def initialize(data_needed_for_finalization)
1676 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1677 * end
1678 * end
1679 *
1680 * Note that if your finalizer references the object to be
1681 * finalized it will never be run on GC, although it will still be
1682 * run at exit. You will get a warning if you capture the object
1683 * to be finalized as the receiver of the finalizer.
1684 *
1685 * class CapturesSelf
1686 * def initialize(name)
1687 * ObjectSpace.define_finalizer(self, proc {
1688 * # this finalizer will only be run on exit
1689 * puts "finalizing #{name}"
1690 * })
1691 * end
1692 * end
1693 *
1694 * Also note that finalization can be unpredictable and is never guaranteed
1695 * to be run except on exit.
1696 */
1697
1698static VALUE
1699define_final(int argc, VALUE *argv, VALUE os)
1700{
1701 VALUE obj, block;
1702
1703 rb_scan_args(argc, argv, "11", &obj, &block);
1704 if (argc == 1) {
1705 block = rb_block_proc();
1706 }
1707
1708 if (rb_callable_receiver(block) == obj) {
1709 rb_warn("finalizer references object to be finalized");
1710 }
1711
1712 return rb_define_finalizer(obj, block);
1713}
1714
1715VALUE
1716rb_define_finalizer(VALUE obj, VALUE block)
1717{
1718 should_be_finalizable(obj);
1719 should_be_callable(block);
1720
1721 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1722
1723 block = rb_ary_new3(2, INT2FIX(0), block);
1724 OBJ_FREEZE(block);
1725 return block;
1726}
1727
1728void
1729rb_objspace_call_finalizer(void)
1730{
1731 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1732}
1733
1734void
1735rb_objspace_free_objects(void *objspace)
1736{
1737 rb_gc_impl_shutdown_free_objects(objspace);
1738}
1739
1740int
1741rb_objspace_garbage_object_p(VALUE obj)
1742{
1743 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1744}
1745
1746bool
1747rb_gc_pointer_to_heap_p(VALUE obj)
1748{
1749 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1750}
1751
1752#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1753#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1754static VALUE id2ref_value = 0;
1755static st_table *id2ref_tbl = NULL;
1756
1757#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1758static size_t object_id_counter = 1;
1759#else
1760static unsigned long long object_id_counter = 1;
1761#endif
1762
1763static inline VALUE
1764generate_next_object_id(void)
1765{
1766#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1767 // 64bit atomics are available
1768 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1769#else
1770 unsigned int lock_lev = RB_GC_VM_LOCK();
1771 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1772 RB_GC_VM_UNLOCK(lock_lev);
1773 return id;
1774#endif
1775}
1776
1777void
1778rb_gc_obj_id_moved(VALUE obj)
1779{
1780 if (UNLIKELY(id2ref_tbl)) {
1781 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1782 }
1783}
1784
1785static int
1786object_id_cmp(st_data_t x, st_data_t y)
1787{
1788 if (RB_TYPE_P(x, T_BIGNUM)) {
1789 return !rb_big_eql(x, y);
1790 }
1791 else {
1792 return x != y;
1793 }
1794}
1795
1796static st_index_t
1797object_id_hash(st_data_t n)
1798{
1799 return FIX2LONG(rb_hash((VALUE)n));
1800}
1801
1802static const struct st_hash_type object_id_hash_type = {
1803 object_id_cmp,
1804 object_id_hash,
1805};
1806
1807static void gc_mark_tbl_no_pin(st_table *table);
1808
1809static void
1810id2ref_tbl_mark(void *data)
1811{
1812 st_table *table = (st_table *)data;
1813 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1814 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1815 rb_mark_set(table);
1816 }
1817 // We purposedly don't mark values, as they are weak references.
1818 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1819}
1820
1821static size_t
1822id2ref_tbl_memsize(const void *data)
1823{
1824 return rb_st_memsize(data);
1825}
1826
1827static void
1828id2ref_tbl_free(void *data)
1829{
1830 id2ref_tbl = NULL; // clear global ref
1831 st_table *table = (st_table *)data;
1832 st_free_table(table);
1833}
1834
1835static const rb_data_type_t id2ref_tbl_type = {
1836 .wrap_struct_name = "VM/_id2ref_table",
1837 .function = {
1838 .dmark = id2ref_tbl_mark,
1839 .dfree = id2ref_tbl_free,
1840 .dsize = id2ref_tbl_memsize,
1841 // dcompact function not required because the table is reference updated
1842 // in rb_gc_vm_weak_table_foreach
1843 },
1844 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1845};
1846
1847static VALUE
1848class_object_id(VALUE klass)
1849{
1850 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1851 if (!id) {
1852 unsigned int lock_lev = RB_GC_VM_LOCK();
1853 id = generate_next_object_id();
1854 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1855 if (existing_id) {
1856 id = existing_id;
1857 }
1858 else if (RB_UNLIKELY(id2ref_tbl)) {
1859 st_insert(id2ref_tbl, id, klass);
1860 }
1861 RB_GC_VM_UNLOCK(lock_lev);
1862 }
1863 return id;
1864}
1865
1866static inline VALUE
1867object_id_get(VALUE obj, shape_id_t shape_id)
1868{
1869 VALUE id;
1870 if (rb_shape_too_complex_p(shape_id)) {
1871 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
1872 }
1873 else {
1874 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
1875 }
1876
1877#if RUBY_DEBUG
1878 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
1879 rb_p(obj);
1880 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
1881 }
1882#endif
1883
1884 return id;
1885}
1886
1887static VALUE
1888object_id0(VALUE obj)
1889{
1890 VALUE id = Qfalse;
1891 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1892
1893 if (rb_shape_has_object_id(shape_id)) {
1894 return object_id_get(obj, shape_id);
1895 }
1896
1897 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1898
1899 id = generate_next_object_id();
1900 rb_obj_field_set(obj, object_id_shape_id, 0, id);
1901
1902 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
1903 RUBY_ASSERT(rb_shape_obj_has_id(obj));
1904
1905 if (RB_UNLIKELY(id2ref_tbl)) {
1906 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1907 }
1908 return id;
1909}
1910
1911static VALUE
1912object_id(VALUE obj)
1913{
1914 switch (BUILTIN_TYPE(obj)) {
1915 case T_CLASS:
1916 case T_MODULE:
1917 // With namespaces, classes and modules have different fields
1918 // in different namespaces, so we cannot store the object id
1919 // in fields.
1920 return class_object_id(obj);
1921 case T_IMEMO:
1922 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
1923 break;
1924 default:
1925 break;
1926 }
1927
1928 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
1929 unsigned int lock_lev = RB_GC_VM_LOCK();
1930 VALUE id = object_id0(obj);
1931 RB_GC_VM_UNLOCK(lock_lev);
1932 return id;
1933 }
1934
1935 return object_id0(obj);
1936}
1937
1938static void
1939build_id2ref_i(VALUE obj, void *data)
1940{
1941 st_table *id2ref_tbl = (st_table *)data;
1942
1943 switch (BUILTIN_TYPE(obj)) {
1944 case T_CLASS:
1945 case T_MODULE:
1946 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1947 if (RCLASS(obj)->object_id) {
1948 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
1949 }
1950 break;
1951 case T_IMEMO:
1952 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1953 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_shape_obj_has_id(obj)) {
1954 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
1955 }
1956 break;
1957 case T_OBJECT:
1958 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1959 if (rb_shape_obj_has_id(obj)) {
1960 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
1961 }
1962 break;
1963 default:
1964 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
1965 break;
1966 }
1967}
1968
1969static VALUE
1970object_id_to_ref(void *objspace_ptr, VALUE object_id)
1971{
1972 rb_objspace_t *objspace = objspace_ptr;
1973
1974 unsigned int lev = RB_GC_VM_LOCK();
1975
1976 if (!id2ref_tbl) {
1977 rb_gc_vm_barrier(); // stop other ractors
1978
1979 // GC Must not trigger while we build the table, otherwise if we end
1980 // up freeing an object that had an ID, we might try to delete it from
1981 // the table even though it wasn't inserted yet.
1982 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
1983 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
1984
1985 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
1986 // objects we just added to the table.
1987 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
1988 bool gc_disabled = RTEST(rb_gc_disable());
1989 {
1990 id2ref_tbl = tmp_id2ref_tbl;
1991 id2ref_value = tmp_id2ref_value;
1992
1993 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
1994 }
1995 if (!gc_disabled) rb_gc_enable();
1996 }
1997
1998 VALUE obj;
1999 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2000
2001 RB_GC_VM_UNLOCK(lev);
2002
2003 if (found) {
2004 return obj;
2005 }
2006
2007 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2008 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2009 }
2010 else {
2011 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2012 }
2013}
2014
2015static inline void
2016obj_free_object_id(VALUE obj)
2017{
2018 VALUE obj_id = 0;
2019 if (RB_UNLIKELY(id2ref_tbl)) {
2020 switch (BUILTIN_TYPE(obj)) {
2021 case T_CLASS:
2022 case T_MODULE:
2023 obj_id = RCLASS(obj)->object_id;
2024 break;
2025 case T_IMEMO:
2026 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2027 return;
2028 }
2029 // fallthrough
2030 case T_OBJECT:
2031 {
2032 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2033 if (rb_shape_has_object_id(shape_id)) {
2034 obj_id = object_id_get(obj, shape_id);
2035 }
2036 break;
2037 }
2038 default:
2039 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2040 return;
2041 }
2042
2043 if (RB_UNLIKELY(obj_id)) {
2044 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2045
2046 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2047 // The the object is a T_IMEMO/fields, then it's possible the actual object
2048 // has been garbage collected already.
2049 if (!RB_TYPE_P(obj, T_IMEMO)) {
2050 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2051 }
2052 }
2053 }
2054 }
2055}
2056
2057void
2058rb_gc_obj_free_vm_weak_references(VALUE obj)
2059{
2060 obj_free_object_id(obj);
2061
2062 if (rb_obj_exivar_p(obj)) {
2064 }
2065
2066 switch (BUILTIN_TYPE(obj)) {
2067 case T_STRING:
2068 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2069 rb_gc_free_fstring(obj);
2070 }
2071 break;
2072 case T_SYMBOL:
2073 rb_gc_free_dsymbol(obj);
2074 break;
2075 case T_IMEMO:
2076 switch (imemo_type(obj)) {
2077 case imemo_callcache: {
2078 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
2079
2080 if (vm_cc_refinement_p(cc)) {
2081 rb_vm_delete_cc_refinement(cc);
2082 }
2083
2084 break;
2085 }
2086 case imemo_callinfo:
2087 rb_vm_ci_free((const struct rb_callinfo *)obj);
2088 break;
2089 case imemo_ment:
2090 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2091 break;
2092 default:
2093 break;
2094 }
2095 break;
2096 default:
2097 break;
2098 }
2099}
2100
2101/*
2102 * call-seq:
2103 * ObjectSpace._id2ref(object_id) -> an_object
2104 *
2105 * Converts an object id to a reference to the object. May not be
2106 * called on an object id passed as a parameter to a finalizer.
2107 *
2108 * s = "I am a string" #=> "I am a string"
2109 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2110 * r == s #=> true
2111 *
2112 * On multi-ractor mode, if the object is not shareable, it raises
2113 * RangeError.
2114 *
2115 * This method is deprecated and should no longer be used.
2116 */
2117
2118static VALUE
2119id2ref(VALUE objid)
2120{
2121#if SIZEOF_LONG == SIZEOF_VOIDP
2122#define NUM2PTR(x) NUM2ULONG(x)
2123#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2124#define NUM2PTR(x) NUM2ULL(x)
2125#endif
2126 objid = rb_to_int(objid);
2127 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2128 VALUE ptr = NUM2PTR(objid);
2129 if (SPECIAL_CONST_P(ptr)) {
2130 if (ptr == Qtrue) return Qtrue;
2131 if (ptr == Qfalse) return Qfalse;
2132 if (NIL_P(ptr)) return Qnil;
2133 if (FIXNUM_P(ptr)) return ptr;
2134 if (FLONUM_P(ptr)) return ptr;
2135
2136 if (SYMBOL_P(ptr)) {
2137 // Check that the symbol is valid
2138 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2139 return ptr;
2140 }
2141 else {
2142 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2143 }
2144 }
2145
2146 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2147 }
2148 }
2149
2150 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2151 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2152 return obj;
2153 }
2154 else {
2155 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2156 }
2157}
2158
2159/* :nodoc: */
2160static VALUE
2161os_id2ref(VALUE os, VALUE objid)
2162{
2163 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2164 return id2ref(objid);
2165}
2166
2167static VALUE
2168rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2169{
2170 if (SPECIAL_CONST_P(obj)) {
2171#if SIZEOF_LONG == SIZEOF_VOIDP
2172 return LONG2NUM((SIGNED_VALUE)obj);
2173#else
2174 return LL2NUM((SIGNED_VALUE)obj);
2175#endif
2176 }
2177
2178 return get_heap_object_id(obj);
2179}
2180
2181static VALUE
2182nonspecial_obj_id(VALUE obj)
2183{
2184#if SIZEOF_LONG == SIZEOF_VOIDP
2185 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2186#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2187 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2188#else
2189# error not supported
2190#endif
2191}
2192
2193VALUE
2194rb_memory_id(VALUE obj)
2195{
2196 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2197}
2198
2199/*
2200 * Document-method: __id__
2201 * Document-method: object_id
2202 *
2203 * call-seq:
2204 * obj.__id__ -> integer
2205 * obj.object_id -> integer
2206 *
2207 * Returns an integer identifier for +obj+.
2208 *
2209 * The same number will be returned on all calls to +object_id+ for a given
2210 * object, and no two active objects will share an id.
2211 *
2212 * Note: that some objects of builtin classes are reused for optimization.
2213 * This is the case for immediate values and frozen string literals.
2214 *
2215 * BasicObject implements +__id__+, Kernel implements +object_id+.
2216 *
2217 * Immediate values are not passed by reference but are passed by value:
2218 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2219 *
2220 * Object.new.object_id == Object.new.object_id # => false
2221 * (21 * 2).object_id == (21 * 2).object_id # => true
2222 * "hello".object_id == "hello".object_id # => false
2223 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2224 */
2225
2226VALUE
2227rb_obj_id(VALUE obj)
2228{
2229 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2230 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2231 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2232 * any immediates. */
2233 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2234}
2235
2236bool
2237rb_obj_id_p(VALUE obj)
2238{
2239 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2240}
2241
2242/*
2243 * GC implementations should call this function before the GC phase that updates references
2244 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2245 * "W^X" policy and protect the code memory from being modified during execution. This function
2246 * makes the code memory writeable.
2247 */
2248void
2249rb_gc_before_updating_jit_code(void)
2250{
2251#if USE_YJIT
2252 rb_yjit_mark_all_writeable();
2253#endif
2254}
2255
2256/*
2257 * GC implementations should call this function before the GC phase that updates references
2258 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2259 * executable again.
2260 */
2261void
2262rb_gc_after_updating_jit_code(void)
2263{
2264#if USE_YJIT
2265 rb_yjit_mark_all_executable();
2266#endif
2267}
2268
2269static void
2270classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2271{
2272 size_t *size = (size_t *)arg;
2273 size_t s = 0;
2274
2275 if (RCLASSEXT_M_TBL(ext)) {
2276 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2277 }
2278 if (RCLASSEXT_CVC_TBL(ext)) {
2279 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2280 }
2281 if (RCLASSEXT_CONST_TBL(ext)) {
2282 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2283 }
2284 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2285 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2286 }
2287 if (!prime) {
2288 s += sizeof(rb_classext_t);
2289 }
2290 *size += s;
2291}
2292
2293static void
2294classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2295{
2296 size_t *size = (size_t *)arg;
2297 size_t array_size;
2298 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2299 RUBY_ASSERT(prime);
2300 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2301 *size += array_size * sizeof(VALUE);
2302 }
2303}
2304
2305size_t
2306rb_obj_memsize_of(VALUE obj)
2307{
2308 size_t size = 0;
2309
2310 if (SPECIAL_CONST_P(obj)) {
2311 return 0;
2312 }
2313
2314 switch (BUILTIN_TYPE(obj)) {
2315 case T_OBJECT:
2316 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2317 if (rb_shape_obj_too_complex_p(obj)) {
2318 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2319 }
2320 else {
2321 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2322 }
2323 }
2324 break;
2325 case T_MODULE:
2326 case T_CLASS:
2327 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2328 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2329 break;
2330 case T_ICLASS:
2331 if (RICLASS_OWNS_M_TBL_P(obj)) {
2332 if (RCLASS_M_TBL(obj)) {
2333 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2334 }
2335 }
2336 break;
2337 case T_STRING:
2338 size += rb_str_memsize(obj);
2339 break;
2340 case T_ARRAY:
2341 size += rb_ary_memsize(obj);
2342 break;
2343 case T_HASH:
2344 if (RHASH_ST_TABLE_P(obj)) {
2345 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2346 /* st_table is in the slot */
2347 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2348 }
2349 break;
2350 case T_REGEXP:
2351 if (RREGEXP_PTR(obj)) {
2352 size += onig_memsize(RREGEXP_PTR(obj));
2353 }
2354 break;
2355 case T_DATA:
2356 size += rb_objspace_data_type_memsize(obj);
2357 break;
2358 case T_MATCH:
2359 {
2360 rb_matchext_t *rm = RMATCH_EXT(obj);
2361 size += onig_region_memsize(&rm->regs);
2362 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2363 }
2364 break;
2365 case T_FILE:
2366 if (RFILE(obj)->fptr) {
2367 size += rb_io_memsize(RFILE(obj)->fptr);
2368 }
2369 break;
2370 case T_RATIONAL:
2371 case T_COMPLEX:
2372 break;
2373 case T_IMEMO:
2374 size += rb_imemo_memsize(obj);
2375 break;
2376
2377 case T_FLOAT:
2378 case T_SYMBOL:
2379 break;
2380
2381 case T_BIGNUM:
2382 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2383 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2384 }
2385 break;
2386
2387 case T_NODE:
2388 UNEXPECTED_NODE(obj_memsize_of);
2389 break;
2390
2391 case T_STRUCT:
2392 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2393 RSTRUCT(obj)->as.heap.ptr) {
2394 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2395 }
2396 break;
2397
2398 case T_ZOMBIE:
2399 case T_MOVED:
2400 break;
2401
2402 default:
2403 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2404 BUILTIN_TYPE(obj), (void*)obj);
2405 }
2406
2407 return size + rb_gc_obj_slot_size(obj);
2408}
2409
2410static int
2411set_zero(st_data_t key, st_data_t val, st_data_t arg)
2412{
2413 VALUE k = (VALUE)key;
2414 VALUE hash = (VALUE)arg;
2415 rb_hash_aset(hash, k, INT2FIX(0));
2416 return ST_CONTINUE;
2417}
2418
2420 size_t counts[T_MASK+1];
2421 size_t freed;
2422 size_t total;
2423};
2424
2425static void
2426count_objects_i(VALUE obj, void *d)
2427{
2428 struct count_objects_data *data = (struct count_objects_data *)d;
2429
2430 if (RBASIC(obj)->flags) {
2431 data->counts[BUILTIN_TYPE(obj)]++;
2432 }
2433 else {
2434 data->freed++;
2435 }
2436
2437 data->total++;
2438}
2439
2440/*
2441 * call-seq:
2442 * ObjectSpace.count_objects([result_hash]) -> hash
2443 *
2444 * Counts all objects grouped by type.
2445 *
2446 * It returns a hash, such as:
2447 * {
2448 * :TOTAL=>10000,
2449 * :FREE=>3011,
2450 * :T_OBJECT=>6,
2451 * :T_CLASS=>404,
2452 * # ...
2453 * }
2454 *
2455 * The contents of the returned hash are implementation specific.
2456 * It may be changed in future.
2457 *
2458 * The keys starting with +:T_+ means live objects.
2459 * For example, +:T_ARRAY+ is the number of arrays.
2460 * +:FREE+ means object slots which is not used now.
2461 * +:TOTAL+ means sum of above.
2462 *
2463 * If the optional argument +result_hash+ is given,
2464 * it is overwritten and returned. This is intended to avoid probe effect.
2465 *
2466 * h = {}
2467 * ObjectSpace.count_objects(h)
2468 * puts h
2469 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2470 *
2471 * This method is only expected to work on C Ruby.
2472 *
2473 */
2474
2475static VALUE
2476count_objects(int argc, VALUE *argv, VALUE os)
2477{
2478 struct count_objects_data data = { 0 };
2479 VALUE hash = Qnil;
2480 VALUE types[T_MASK + 1];
2481
2482 if (rb_check_arity(argc, 0, 1) == 1) {
2483 hash = argv[0];
2484 if (!RB_TYPE_P(hash, T_HASH))
2485 rb_raise(rb_eTypeError, "non-hash given");
2486 }
2487
2488 for (size_t i = 0; i <= T_MASK; i++) {
2489 // type_sym can allocate an object,
2490 // so we need to create all key symbols in advance
2491 // not to disturb the result
2492 types[i] = type_sym(i);
2493 }
2494
2495 // Same as type_sym, we need to create all key symbols in advance
2496 VALUE total = ID2SYM(rb_intern("TOTAL"));
2497 VALUE free = ID2SYM(rb_intern("FREE"));
2498
2499 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2500
2501 if (NIL_P(hash)) {
2502 hash = rb_hash_new();
2503 }
2504 else if (!RHASH_EMPTY_P(hash)) {
2505 rb_hash_stlike_foreach(hash, set_zero, hash);
2506 }
2507 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2508 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2509
2510 for (size_t i = 0; i <= T_MASK; i++) {
2511 if (data.counts[i]) {
2512 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2513 }
2514 }
2515
2516 return hash;
2517}
2518
2519#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2520
2521#define STACK_START (ec->machine.stack_start)
2522#define STACK_END (ec->machine.stack_end)
2523#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2524
2525#if STACK_GROW_DIRECTION < 0
2526# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2527#elif STACK_GROW_DIRECTION > 0
2528# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2529#else
2530# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2531 : (size_t)(STACK_END - STACK_START + 1))
2532#endif
2533#if !STACK_GROW_DIRECTION
2534int ruby_stack_grow_direction;
2535int
2536ruby_get_stack_grow_direction(volatile VALUE *addr)
2537{
2538 VALUE *end;
2539 SET_MACHINE_STACK_END(&end);
2540
2541 if (end > addr) return ruby_stack_grow_direction = 1;
2542 return ruby_stack_grow_direction = -1;
2543}
2544#endif
2545
2546size_t
2548{
2549 rb_execution_context_t *ec = GET_EC();
2550 SET_STACK_END;
2551 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2552 return STACK_LENGTH;
2553}
2554
2555#define PREVENT_STACK_OVERFLOW 1
2556#ifndef PREVENT_STACK_OVERFLOW
2557#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2558# define PREVENT_STACK_OVERFLOW 1
2559#else
2560# define PREVENT_STACK_OVERFLOW 0
2561#endif
2562#endif
2563#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2564static int
2565stack_check(rb_execution_context_t *ec, int water_mark)
2566{
2567 SET_STACK_END;
2568
2569 size_t length = STACK_LENGTH;
2570 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2571
2572 return length > maximum_length;
2573}
2574#else
2575#define stack_check(ec, water_mark) FALSE
2576#endif
2577
2578#define STACKFRAME_FOR_CALL_CFUNC 2048
2579
2580int
2581rb_ec_stack_check(rb_execution_context_t *ec)
2582{
2583 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2584}
2585
2586int
2588{
2589 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2590}
2591
2592/* ==================== Marking ==================== */
2593
2594#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2595 if (!RB_SPECIAL_CONST_P(obj)) { \
2596 rb_vm_t *vm = GET_VM(); \
2597 void *objspace = vm->gc.objspace; \
2598 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2599 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2600 (func)(objspace, (obj_or_ptr)); \
2601 } \
2602 else if (check_obj ? \
2603 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2604 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2605 true) { \
2606 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2607 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2608 vm->gc.mark_func_data = NULL; \
2609 mark_func_data->mark_func((obj), mark_func_data->data); \
2610 vm->gc.mark_func_data = mark_func_data; \
2611 } \
2612 } \
2613} while (0)
2614
2615static inline void
2616gc_mark_internal(VALUE obj)
2617{
2618 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2619}
2620
2621void
2622rb_gc_mark_movable(VALUE obj)
2623{
2624 gc_mark_internal(obj);
2625}
2626
2627void
2628rb_gc_mark_and_move(VALUE *ptr)
2629{
2630 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2631}
2632
2633static inline void
2634gc_mark_and_pin_internal(VALUE obj)
2635{
2636 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2637}
2638
2639void
2640rb_gc_mark(VALUE obj)
2641{
2642 gc_mark_and_pin_internal(obj);
2643}
2644
2645static inline void
2646gc_mark_maybe_internal(VALUE obj)
2647{
2648 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2649}
2650
2651void
2652rb_gc_mark_maybe(VALUE obj)
2653{
2654 gc_mark_maybe_internal(obj);
2655}
2656
2657void
2658rb_gc_mark_weak(VALUE *ptr)
2659{
2660 if (RB_SPECIAL_CONST_P(*ptr)) return;
2661
2662 rb_vm_t *vm = GET_VM();
2663 void *objspace = vm->gc.objspace;
2664 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2665 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2666
2667 rb_gc_impl_mark_weak(objspace, ptr);
2668 }
2669 else {
2670 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2671 }
2672}
2673
2674void
2675rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2676{
2677 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2678}
2679
2680ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2681static void
2682each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2683{
2684 VALUE v;
2685 while (n--) {
2686 v = *x;
2687 cb(v, data);
2688 x++;
2689 }
2690}
2691
2692static void
2693each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2694{
2695 if (end <= start) return;
2696 each_location(start, end - start, cb, data);
2697}
2698
2699static void
2700gc_mark_maybe_each_location(VALUE obj, void *data)
2701{
2702 gc_mark_maybe_internal(obj);
2703}
2704
2705void
2706rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2707{
2708 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2709}
2710
2711void
2712rb_gc_mark_values(long n, const VALUE *values)
2713{
2714 for (long i = 0; i < n; i++) {
2715 gc_mark_internal(values[i]);
2716 }
2717}
2718
2719void
2720rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2721{
2722 for (long i = 0; i < n; i++) {
2723 gc_mark_and_pin_internal(values[i]);
2724 }
2725}
2726
2727static int
2728mark_key(st_data_t key, st_data_t value, st_data_t data)
2729{
2730 gc_mark_and_pin_internal((VALUE)key);
2731
2732 return ST_CONTINUE;
2733}
2734
2735void
2736rb_mark_set(st_table *tbl)
2737{
2738 if (!tbl) return;
2739
2740 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2741}
2742
2743static int
2744mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2745{
2746 gc_mark_internal((VALUE)key);
2747 gc_mark_internal((VALUE)value);
2748
2749 return ST_CONTINUE;
2750}
2751
2752static int
2753pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2754{
2755 gc_mark_and_pin_internal((VALUE)key);
2756 gc_mark_and_pin_internal((VALUE)value);
2757
2758 return ST_CONTINUE;
2759}
2760
2761static int
2762pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2763{
2764 gc_mark_and_pin_internal((VALUE)key);
2765 gc_mark_internal((VALUE)value);
2766
2767 return ST_CONTINUE;
2768}
2769
2770static void
2771mark_hash(VALUE hash)
2772{
2773 if (rb_hash_compare_by_id_p(hash)) {
2774 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2775 }
2776 else {
2777 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2778 }
2779
2780 gc_mark_internal(RHASH(hash)->ifnone);
2781}
2782
2783void
2784rb_mark_hash(st_table *tbl)
2785{
2786 if (!tbl) return;
2787
2788 st_foreach(tbl, pin_key_pin_value, 0);
2789}
2790
2791static enum rb_id_table_iterator_result
2792mark_method_entry_i(VALUE me, void *objspace)
2793{
2794 gc_mark_internal(me);
2795
2796 return ID_TABLE_CONTINUE;
2797}
2798
2799static void
2800mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2801{
2802 if (tbl) {
2803 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2804 }
2805}
2806
2807bool rb_gc_impl_checking_shareable(void *objspace_ptr); // in defaut/deafult.c
2808
2809bool
2810rb_gc_checking_shareable(void)
2811{
2812 return rb_gc_impl_checking_shareable(rb_gc_get_objspace());
2813}
2814
2815
2816static enum rb_id_table_iterator_result
2817mark_const_entry_i(VALUE value, void *objspace)
2818{
2819 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2820
2821 if (!rb_gc_impl_checking_shareable(objspace)) {
2822 gc_mark_internal(ce->value);
2823 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
2824 }
2825 return ID_TABLE_CONTINUE;
2826}
2827
2828static void
2829mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2830{
2831 if (!tbl) return;
2832 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2833}
2834
2835static enum rb_id_table_iterator_result
2836mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2837{
2838 struct rb_cvar_class_tbl_entry *entry;
2839
2840 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2841
2842 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2843 gc_mark_internal((VALUE)entry->cref);
2844
2845 return ID_TABLE_CONTINUE;
2846}
2847
2848static void
2849mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2850{
2851 if (!tbl) return;
2852 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2853}
2854
2855#if STACK_GROW_DIRECTION < 0
2856#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2857#elif STACK_GROW_DIRECTION > 0
2858#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2859#else
2860#define GET_STACK_BOUNDS(start, end, appendix) \
2861 ((STACK_END < STACK_START) ? \
2862 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2863#endif
2864
2865static void
2866gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2867{
2868 gc_mark_maybe_internal(obj);
2869
2870#ifdef RUBY_ASAN_ENABLED
2871 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2872 void *fake_frame_start;
2873 void *fake_frame_end;
2874 bool is_fake_frame = asan_get_fake_stack_extents(
2875 ec->machine.asan_fake_stack_handle, obj,
2876 ec->machine.stack_start, ec->machine.stack_end,
2877 &fake_frame_start, &fake_frame_end
2878 );
2879 if (is_fake_frame) {
2880 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2881 }
2882#endif
2883}
2884
2885static VALUE
2886gc_location_internal(void *objspace, VALUE value)
2887{
2888 if (SPECIAL_CONST_P(value)) {
2889 return value;
2890 }
2891
2892 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2893
2894 return rb_gc_impl_location(objspace, value);
2895}
2896
2897VALUE
2898rb_gc_location(VALUE value)
2899{
2900 return gc_location_internal(rb_gc_get_objspace(), value);
2901}
2902
2903#if defined(__wasm__)
2904
2905
2906static VALUE *rb_stack_range_tmp[2];
2907
2908static void
2909rb_mark_locations(void *begin, void *end)
2910{
2911 rb_stack_range_tmp[0] = begin;
2912 rb_stack_range_tmp[1] = end;
2913}
2914
2915void
2916rb_gc_save_machine_context(void)
2917{
2918 // no-op
2919}
2920
2921# if defined(__EMSCRIPTEN__)
2922
2923static void
2924mark_current_machine_context(const rb_execution_context_t *ec)
2925{
2926 emscripten_scan_stack(rb_mark_locations);
2927 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2928
2929 emscripten_scan_registers(rb_mark_locations);
2930 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2931}
2932# else // use Asyncify version
2933
2934static void
2935mark_current_machine_context(rb_execution_context_t *ec)
2936{
2937 VALUE *stack_start, *stack_end;
2938 SET_STACK_END;
2939 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2940 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2941
2942 rb_wasm_scan_locals(rb_mark_locations);
2943 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2944}
2945
2946# endif
2947
2948#else // !defined(__wasm__)
2949
2950void
2951rb_gc_save_machine_context(void)
2952{
2953 rb_thread_t *thread = GET_THREAD();
2954
2955 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2956}
2957
2958
2959static void
2960mark_current_machine_context(const rb_execution_context_t *ec)
2961{
2962 rb_gc_mark_machine_context(ec);
2963}
2964#endif
2965
2966void
2967rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2968{
2969 VALUE *stack_start, *stack_end;
2970
2971 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2972 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2973
2974 void *data =
2975#ifdef RUBY_ASAN_ENABLED
2976 /* gc_mark_machine_stack_location_maybe() uses data as const */
2978#else
2979 NULL;
2980#endif
2981
2982 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2983 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2984 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2985}
2986
2987static int
2988rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2989{
2990 gc_mark_and_pin_internal((VALUE)value);
2991
2992 return ST_CONTINUE;
2993}
2994
2995void
2996rb_mark_tbl(st_table *tbl)
2997{
2998 if (!tbl || tbl->num_entries == 0) return;
2999
3000 st_foreach(tbl, rb_mark_tbl_i, 0);
3001}
3002
3003static void
3004gc_mark_tbl_no_pin(st_table *tbl)
3005{
3006 if (!tbl || tbl->num_entries == 0) return;
3007
3008 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3009}
3010
3011void
3012rb_mark_tbl_no_pin(st_table *tbl)
3013{
3014 gc_mark_tbl_no_pin(tbl);
3015}
3016
3017static bool
3018gc_declarative_marking_p(const rb_data_type_t *type)
3019{
3020 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3021}
3022
3023void
3024rb_gc_mark_roots(void *objspace, const char **categoryp)
3025{
3026 rb_execution_context_t *ec = GET_EC();
3027 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3028
3029#define MARK_CHECKPOINT(category) do { \
3030 if (categoryp) *categoryp = category; \
3031} while (0)
3032
3033 MARK_CHECKPOINT("vm");
3034 rb_vm_mark(vm);
3035
3036 MARK_CHECKPOINT("end_proc");
3037 rb_mark_end_proc();
3038
3039 MARK_CHECKPOINT("global_tbl");
3040 rb_gc_mark_global_tbl();
3041
3042#if USE_YJIT
3043 void rb_yjit_root_mark(void); // in Rust
3044
3045 if (rb_yjit_enabled_p) {
3046 MARK_CHECKPOINT("YJIT");
3047 rb_yjit_root_mark();
3048 }
3049#endif
3050
3051#if USE_ZJIT
3052 void rb_zjit_root_mark(void);
3053 if (rb_zjit_enabled_p) {
3054 MARK_CHECKPOINT("ZJIT");
3055 rb_zjit_root_mark();
3056 }
3057#endif
3058
3059 MARK_CHECKPOINT("machine_context");
3060 mark_current_machine_context(ec);
3061
3062 MARK_CHECKPOINT("global_symbols");
3063 rb_sym_global_symbols_mark_and_move();
3064
3065 MARK_CHECKPOINT("finish");
3066
3067#undef MARK_CHECKPOINT
3068}
3069
3074
3075static void
3076gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3077{
3079 rb_objspace_t *objspace = foreach_arg->objspace;
3080
3081 if (RCLASSEXT_SUPER(ext)) {
3082 gc_mark_internal(RCLASSEXT_SUPER(ext));
3083 }
3084 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3085
3086 if (!rb_gc_impl_checking_shareable(objspace)) {
3087 // unshareable
3088 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3089 }
3090
3091 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3092 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3093 }
3094 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3095 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3096 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3097 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3098}
3099
3100static void
3101gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3102{
3104 rb_objspace_t *objspace = foreach_arg->objspace;
3105
3106 if (RCLASSEXT_SUPER(ext)) {
3107 gc_mark_internal(RCLASSEXT_SUPER(ext));
3108 }
3109 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3110 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3111 }
3112 if (RCLASSEXT_INCLUDER(ext)) {
3113 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3114 }
3115 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3116 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3117}
3118
3119#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3120
3121void
3122rb_gc_mark_children(void *objspace, VALUE obj)
3123{
3124 struct gc_mark_classext_foreach_arg foreach_args;
3125
3126 if (rb_obj_exivar_p(obj)) {
3127 rb_mark_generic_ivar(obj);
3128 }
3129
3130 switch (BUILTIN_TYPE(obj)) {
3131 case T_FLOAT:
3132 case T_BIGNUM:
3133 return;
3134
3135 case T_NIL:
3136 case T_FIXNUM:
3137 rb_bug("rb_gc_mark() called for broken object");
3138 break;
3139
3140 case T_NODE:
3141 UNEXPECTED_NODE(rb_gc_mark);
3142 break;
3143
3144 case T_IMEMO:
3145 rb_imemo_mark_and_move(obj, false);
3146 return;
3147
3148 default:
3149 break;
3150 }
3151
3152 gc_mark_internal(RBASIC(obj)->klass);
3153
3154 switch (BUILTIN_TYPE(obj)) {
3155 case T_CLASS:
3156 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3157 !rb_gc_impl_checking_shareable(objspace)) {
3158 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3159 }
3160 // Continue to the shared T_CLASS/T_MODULE
3161 case T_MODULE:
3162 foreach_args.objspace = objspace;
3163 foreach_args.obj = obj;
3164 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3165 break;
3166
3167 case T_ICLASS:
3168 foreach_args.objspace = objspace;
3169 foreach_args.obj = obj;
3170 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3171 break;
3172
3173 case T_ARRAY:
3174 if (ARY_SHARED_P(obj)) {
3175 VALUE root = ARY_SHARED_ROOT(obj);
3176 gc_mark_internal(root);
3177 }
3178 else {
3179 long len = RARRAY_LEN(obj);
3180 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3181 for (long i = 0; i < len; i++) {
3182 gc_mark_internal(ptr[i]);
3183 }
3184 }
3185 break;
3186
3187 case T_HASH:
3188 mark_hash(obj);
3189 break;
3190
3191 case T_SYMBOL:
3192 gc_mark_internal(RSYMBOL(obj)->fstr);
3193 break;
3194
3195 case T_STRING:
3196 if (STR_SHARED_P(obj)) {
3197 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3198 /* Embedded shared strings cannot be moved because this string
3199 * points into the slot of the shared string. There may be code
3200 * using the RSTRING_PTR on the stack, which would pin this
3201 * string but not pin the shared string, causing it to move. */
3202 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3203 }
3204 else {
3205 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3206 }
3207 }
3208 break;
3209
3210 case T_DATA: {
3211 bool typed_data = RTYPEDDATA_P(obj);
3212 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3213
3214 if (typed_data) {
3215 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3216 }
3217
3218 if (ptr) {
3219 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3220 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3221
3222 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3223 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3224 }
3225 }
3226 else {
3227 RUBY_DATA_FUNC mark_func = typed_data ?
3229 RDATA(obj)->dmark;
3230 if (mark_func) (*mark_func)(ptr);
3231 }
3232 }
3233
3234 break;
3235 }
3236
3237 case T_OBJECT: {
3238 if (rb_shape_obj_too_complex_p(obj)) {
3239 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3240 }
3241 else {
3242 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3243
3244 uint32_t len = ROBJECT_FIELDS_COUNT(obj);
3245 for (uint32_t i = 0; i < len; i++) {
3246 gc_mark_internal(ptr[i]);
3247 }
3248 }
3249
3250 attr_index_t fields_count = ROBJECT_FIELDS_COUNT(obj);
3251 if (fields_count) {
3252 VALUE klass = RBASIC_CLASS(obj);
3253
3254 // Increment max_iv_count if applicable, used to determine size pool allocation
3255 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3256 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3257 }
3258 }
3259
3260 break;
3261 }
3262
3263 case T_FILE:
3264 if (RFILE(obj)->fptr) {
3265 gc_mark_internal(RFILE(obj)->fptr->self);
3266 gc_mark_internal(RFILE(obj)->fptr->pathv);
3267 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3268 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3269 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3270 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3271 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3272 gc_mark_internal(RFILE(obj)->fptr->timeout);
3273 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3274 }
3275 break;
3276
3277 case T_REGEXP:
3278 gc_mark_internal(RREGEXP(obj)->src);
3279 break;
3280
3281 case T_MATCH:
3282 gc_mark_internal(RMATCH(obj)->regexp);
3283 if (RMATCH(obj)->str) {
3284 gc_mark_internal(RMATCH(obj)->str);
3285 }
3286 break;
3287
3288 case T_RATIONAL:
3289 gc_mark_internal(RRATIONAL(obj)->num);
3290 gc_mark_internal(RRATIONAL(obj)->den);
3291 break;
3292
3293 case T_COMPLEX:
3294 gc_mark_internal(RCOMPLEX(obj)->real);
3295 gc_mark_internal(RCOMPLEX(obj)->imag);
3296 break;
3297
3298 case T_STRUCT: {
3299 const long len = RSTRUCT_LEN(obj);
3300 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3301
3302 for (long i = 0; i < len; i++) {
3303 gc_mark_internal(ptr[i]);
3304 }
3305
3306 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3307 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3308 }
3309
3310 break;
3311 }
3312
3313 default:
3314 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3315 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3316 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3317 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3318 BUILTIN_TYPE(obj), (void *)obj,
3319 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3320 }
3321}
3322
3323size_t
3324rb_gc_obj_optimal_size(VALUE obj)
3325{
3326 switch (BUILTIN_TYPE(obj)) {
3327 case T_ARRAY:
3328 return rb_ary_size_as_embedded(obj);
3329
3330 case T_OBJECT:
3331 if (rb_shape_obj_too_complex_p(obj)) {
3332 return sizeof(struct RObject);
3333 }
3334 else {
3335 return rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3336 }
3337
3338 case T_STRING:
3339 return rb_str_size_as_embedded(obj);
3340
3341 case T_HASH:
3342 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3343
3344 default:
3345 return 0;
3346 }
3347}
3348
3349void
3350rb_gc_writebarrier(VALUE a, VALUE b)
3351{
3352 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3353}
3354
3355void
3356rb_gc_writebarrier_unprotect(VALUE obj)
3357{
3358 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3359}
3360
3361/*
3362 * remember `obj' if needed.
3363 */
3364void
3365rb_gc_writebarrier_remember(VALUE obj)
3366{
3367 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3368}
3369
3370void
3371rb_gc_copy_attributes(VALUE dest, VALUE obj)
3372{
3373 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3374}
3375
3376int
3377rb_gc_modular_gc_loaded_p(void)
3378{
3379#if USE_MODULAR_GC
3380 return rb_gc_functions.modular_gc_loaded_p;
3381#else
3382 return false;
3383#endif
3384}
3385
3386const char *
3387rb_gc_active_gc_name(void)
3388{
3389 const char *gc_name = rb_gc_impl_active_gc_name();
3390
3391 const size_t len = strlen(gc_name);
3392 if (len > RB_GC_MAX_NAME_LEN) {
3393 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3394 RB_GC_MAX_NAME_LEN, len, gc_name);
3395 }
3396
3397 return gc_name;
3398}
3399
3401rb_gc_object_metadata(VALUE obj)
3402{
3403 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3404}
3405
3406/* GC */
3407
3408void *
3409rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3410{
3411 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3412}
3413
3414void
3415rb_gc_ractor_cache_free(void *cache)
3416{
3417 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3418}
3419
3420void
3421rb_gc_register_mark_object(VALUE obj)
3422{
3423 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3424 return;
3425
3426 rb_vm_register_global_object(obj);
3427}
3428
3429void
3430rb_gc_register_address(VALUE *addr)
3431{
3432 rb_vm_t *vm = GET_VM();
3433
3434 VALUE obj = *addr;
3435
3436 struct global_object_list *tmp = ALLOC(struct global_object_list);
3437 tmp->next = vm->global_object_list;
3438 tmp->varptr = addr;
3439 vm->global_object_list = tmp;
3440
3441 /*
3442 * Because some C extensions have assignment-then-register bugs,
3443 * we guard `obj` here so that it would not get swept defensively.
3444 */
3445 RB_GC_GUARD(obj);
3446 if (0 && !SPECIAL_CONST_P(obj)) {
3447 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3448 rb_obj_class(obj));
3449 rb_print_backtrace(stderr);
3450 }
3451}
3452
3453void
3454rb_gc_unregister_address(VALUE *addr)
3455{
3456 rb_vm_t *vm = GET_VM();
3457 struct global_object_list *tmp = vm->global_object_list;
3458
3459 if (tmp->varptr == addr) {
3460 vm->global_object_list = tmp->next;
3461 xfree(tmp);
3462 return;
3463 }
3464 while (tmp->next) {
3465 if (tmp->next->varptr == addr) {
3466 struct global_object_list *t = tmp->next;
3467
3468 tmp->next = tmp->next->next;
3469 xfree(t);
3470 break;
3471 }
3472 tmp = tmp->next;
3473 }
3474}
3475
3476void
3478{
3479 rb_gc_register_address(var);
3480}
3481
3482static VALUE
3483gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3484{
3485 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3486
3487 return Qnil;
3488}
3489
3490/*
3491 * rb_objspace_each_objects() is special C API to walk through
3492 * Ruby object space. This C API is too difficult to use it.
3493 * To be frank, you should not use it. Or you need to read the
3494 * source code of this function and understand what this function does.
3495 *
3496 * 'callback' will be called several times (the number of heap page,
3497 * at current implementation) with:
3498 * vstart: a pointer to the first living object of the heap_page.
3499 * vend: a pointer to next to the valid heap_page area.
3500 * stride: a distance to next VALUE.
3501 *
3502 * If callback() returns non-zero, the iteration will be stopped.
3503 *
3504 * This is a sample callback code to iterate liveness objects:
3505 *
3506 * static int
3507 * sample_callback(void *vstart, void *vend, int stride, void *data)
3508 * {
3509 * VALUE v = (VALUE)vstart;
3510 * for (; v != (VALUE)vend; v += stride) {
3511 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3512 * // do something with live object 'v'
3513 * }
3514 * }
3515 * return 0; // continue to iteration
3516 * }
3517 *
3518 * Note: 'vstart' is not a top of heap_page. This point the first
3519 * living object to grasp at least one object to avoid GC issue.
3520 * This means that you can not walk through all Ruby object page
3521 * including freed object page.
3522 *
3523 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3524 * However, there are possibilities to pass variable values with
3525 * 'stride' with some reasons. You must use stride instead of
3526 * use some constant value in the iteration.
3527 */
3528void
3529rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3530{
3531 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3532}
3533
3534static void
3535gc_ref_update_array(void *objspace, VALUE v)
3536{
3537 if (ARY_SHARED_P(v)) {
3538 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3539
3540 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3541
3542 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3543 // If the root is embedded and its location has changed
3544 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3545 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3546 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3547 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3548 }
3549 }
3550 else {
3551 long len = RARRAY_LEN(v);
3552
3553 if (len > 0) {
3554 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3555 for (long i = 0; i < len; i++) {
3556 UPDATE_IF_MOVED(objspace, ptr[i]);
3557 }
3558 }
3559
3560 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3561 if (rb_ary_embeddable_p(v)) {
3562 rb_ary_make_embedded(v);
3563 }
3564 }
3565 }
3566}
3567
3568static void
3569gc_ref_update_object(void *objspace, VALUE v)
3570{
3571 VALUE *ptr = ROBJECT_FIELDS(v);
3572
3573 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3574 if (rb_shape_obj_too_complex_p(v)) {
3575 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3576 return;
3577 }
3578
3579 size_t slot_size = rb_gc_obj_slot_size(v);
3580 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3581 if (slot_size >= embed_size) {
3582 // Object can be re-embedded
3583 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3584 FL_UNSET_RAW(v, ROBJECT_HEAP);
3585 xfree(ptr);
3586 ptr = ROBJECT(v)->as.ary;
3587 }
3588 }
3589
3590 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3591 UPDATE_IF_MOVED(objspace, ptr[i]);
3592 }
3593}
3594
3595void
3596rb_gc_ref_update_table_values_only(st_table *tbl)
3597{
3598 gc_ref_update_table_values_only(tbl);
3599}
3600
3601/* Update MOVED references in a VALUE=>VALUE st_table */
3602void
3603rb_gc_update_tbl_refs(st_table *ptr)
3604{
3605 gc_update_table_refs(ptr);
3606}
3607
3608static void
3609gc_ref_update_hash(void *objspace, VALUE v)
3610{
3611 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3612}
3613
3614static void
3615gc_update_values(void *objspace, long n, VALUE *values)
3616{
3617 for (long i = 0; i < n; i++) {
3618 UPDATE_IF_MOVED(objspace, values[i]);
3619 }
3620}
3621
3622void
3623rb_gc_update_values(long n, VALUE *values)
3624{
3625 gc_update_values(rb_gc_get_objspace(), n, values);
3626}
3627
3628static enum rb_id_table_iterator_result
3629check_id_table_move(VALUE value, void *data)
3630{
3631 void *objspace = (void *)data;
3632
3633 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3634 return ID_TABLE_REPLACE;
3635 }
3636
3637 return ID_TABLE_CONTINUE;
3638}
3639
3640void
3641rb_gc_prepare_heap_process_object(VALUE obj)
3642{
3643 switch (BUILTIN_TYPE(obj)) {
3644 case T_STRING:
3645 // Precompute the string coderange. This both save time for when it will be
3646 // eventually needed, and avoid mutating heap pages after a potential fork.
3648 break;
3649 default:
3650 break;
3651 }
3652}
3653
3654void
3655rb_gc_prepare_heap(void)
3656{
3657 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3658}
3659
3660size_t
3661rb_gc_heap_id_for_size(size_t size)
3662{
3663 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3664}
3665
3666bool
3667rb_gc_size_allocatable_p(size_t size)
3668{
3669 return rb_gc_impl_size_allocatable_p(size);
3670}
3671
3672static enum rb_id_table_iterator_result
3673update_id_table(VALUE *value, void *data, int existing)
3674{
3675 void *objspace = (void *)data;
3676
3677 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3678 *value = gc_location_internal(objspace, (VALUE)*value);
3679 }
3680
3681 return ID_TABLE_CONTINUE;
3682}
3683
3684static void
3685update_m_tbl(void *objspace, struct rb_id_table *tbl)
3686{
3687 if (tbl) {
3688 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3689 }
3690}
3691
3692static enum rb_id_table_iterator_result
3693update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3694{
3695 struct rb_cvar_class_tbl_entry *entry;
3696
3697 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3698
3699 if (entry->cref) {
3700 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3701 }
3702
3703 entry->class_value = gc_location_internal(objspace, entry->class_value);
3704
3705 return ID_TABLE_CONTINUE;
3706}
3707
3708static void
3709update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3710{
3711 if (!tbl) return;
3712 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3713}
3714
3715static enum rb_id_table_iterator_result
3716update_const_tbl_i(VALUE value, void *objspace)
3717{
3718 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3719
3720 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3721 ce->value = gc_location_internal(objspace, ce->value);
3722 }
3723
3724 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3725 ce->file = gc_location_internal(objspace, ce->file);
3726 }
3727
3728 return ID_TABLE_CONTINUE;
3729}
3730
3731static void
3732update_const_tbl(void *objspace, struct rb_id_table *tbl)
3733{
3734 if (!tbl) return;
3735 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3736}
3737
3738static void
3739update_subclasses(void *objspace, rb_classext_t *ext)
3740{
3741 rb_subclass_entry_t *entry;
3742 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3743 if (!anchor) return;
3744 entry = anchor->head;
3745 while (entry) {
3746 if (entry->klass)
3747 UPDATE_IF_MOVED(objspace, entry->klass);
3748 entry = entry->next;
3749 }
3750}
3751
3752static void
3753update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3754{
3755 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
3756 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
3757 for (size_t i = 0; i < array_size; i++) {
3758 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3759 }
3760 }
3761}
3762
3763static void
3764update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3765{
3766 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3767 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3768 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3769 if (is_iclass) {
3770 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3771 }
3772}
3773
3774static void
3775update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3776{
3777 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3778 rb_objspace_t *objspace = args->objspace;
3779
3780 if (RCLASSEXT_SUPER(ext)) {
3781 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3782 }
3783
3784 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3785
3786 UPDATE_IF_MOVED(objspace, ext->fields_obj);
3787 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3788 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3789 }
3790 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3791 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3792 update_superclasses(objspace, ext);
3793 update_subclasses(objspace, ext);
3794
3795 update_classext_values(objspace, ext, false);
3796}
3797
3798static void
3799update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3800{
3801 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3802 rb_objspace_t *objspace = args->objspace;
3803
3804 if (RCLASSEXT_SUPER(ext)) {
3805 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3806 }
3807 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3808 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3809 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3810 update_subclasses(objspace, ext);
3811
3812 update_classext_values(objspace, ext, true);
3813}
3814
3816 vm_table_foreach_callback_func callback;
3817 vm_table_update_callback_func update_callback;
3818 void *data;
3819 bool weak_only;
3820};
3821
3822static int
3823vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3824{
3825 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3826
3827 int ret = iter_data->callback((VALUE)key, iter_data->data);
3828
3829 if (!iter_data->weak_only) {
3830 if (ret != ST_CONTINUE) return ret;
3831
3832 ret = iter_data->callback((VALUE)value, iter_data->data);
3833 }
3834
3835 return ret;
3836}
3837
3838static int
3839vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3840{
3841 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3842
3843 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3844
3845 if (!iter_data->weak_only) {
3846 if (ret != ST_CONTINUE) return ret;
3847
3848 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3849 }
3850
3851 return ret;
3852}
3853
3854static int
3855vm_weak_table_cc_refinement_foreach(st_data_t key, st_data_t data, int error)
3856{
3857 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3858
3859 return iter_data->callback((VALUE)key, iter_data->data);
3860}
3861
3862static int
3863vm_weak_table_cc_refinement_foreach_update_update(st_data_t *key, st_data_t data, int existing)
3864{
3865 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3866
3867 return iter_data->update_callback((VALUE *)key, iter_data->data);
3868}
3869
3870
3871static int
3872vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
3873{
3874 VALUE sym = *sym_ptr;
3875 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3876
3877 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
3878
3879 int ret = iter_data->callback(sym, iter_data->data);
3880
3881 if (ret == ST_REPLACE) {
3882 ret = iter_data->update_callback(sym_ptr, iter_data->data);
3883 }
3884
3885 return ret;
3886}
3887
3888struct st_table *rb_generic_fields_tbl_get(void);
3889
3890static int
3891vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3892{
3893 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3894
3895 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3896 int ret = iter_data->callback((VALUE)key, iter_data->data);
3897 if (ret != ST_CONTINUE) return ret;
3898 }
3899
3900 return iter_data->callback((VALUE)value, iter_data->data);
3901}
3902
3903static int
3904vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3905{
3906 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3907
3908 iter_data->update_callback((VALUE *)value, iter_data->data);
3909
3910 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
3911 iter_data->update_callback((VALUE *)key, iter_data->data);
3912 }
3913
3914 return ST_CONTINUE;
3915}
3916
3917static int
3918vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
3919{
3920 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3921
3922 int ret = iter_data->callback((VALUE)key, iter_data->data);
3923
3924 VALUE new_value = (VALUE)value;
3925 VALUE new_key = (VALUE)key;
3926
3927 switch (ret) {
3928 case ST_CONTINUE:
3929 break;
3930
3931 case ST_DELETE:
3932 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
3933 return ST_DELETE;
3934
3935 case ST_REPLACE: {
3936 ret = iter_data->update_callback(&new_key, iter_data->data);
3937 if (key != new_key) {
3938 ret = ST_DELETE;
3939 }
3940 break;
3941 }
3942
3943 default:
3944 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
3945 }
3946
3947 if (!iter_data->weak_only) {
3948 int ivar_ret = iter_data->callback(new_value, iter_data->data);
3949 switch (ivar_ret) {
3950 case ST_CONTINUE:
3951 break;
3952
3953 case ST_REPLACE:
3954 iter_data->update_callback(&new_value, iter_data->data);
3955 break;
3956
3957 default:
3958 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
3959 }
3960 }
3961
3962 if (key != new_key || value != new_value) {
3963 DURING_GC_COULD_MALLOC_REGION_START();
3964 {
3965 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
3966 }
3967 DURING_GC_COULD_MALLOC_REGION_END();
3968 }
3969
3970 return ret;
3971}
3972
3973static int
3974vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
3975{
3976 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
3977 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3978 int retval = iter_data->callback(*str, iter_data->data);
3979
3980 if (retval == ST_REPLACE) {
3981 retval = iter_data->update_callback(str, iter_data->data);
3982 }
3983
3984 if (retval == ST_DELETE) {
3985 FL_UNSET(*str, RSTRING_FSTR);
3986 }
3987
3988 return retval;
3989}
3990
3991void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
3992void
3993rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3994 vm_table_update_callback_func update_callback,
3995 void *data,
3996 bool weak_only,
3997 enum rb_gc_vm_weak_tables table)
3998{
3999 rb_vm_t *vm = GET_VM();
4000
4001 struct global_vm_table_foreach_data foreach_data = {
4002 .callback = callback,
4003 .update_callback = update_callback,
4004 .data = data,
4005 .weak_only = weak_only,
4006 };
4007
4008 switch (table) {
4009 case RB_GC_VM_CI_TABLE: {
4010 if (vm->ci_table) {
4011 st_foreach_with_replace(
4012 vm->ci_table,
4013 vm_weak_table_foreach_weak_key,
4014 vm_weak_table_foreach_update_weak_key,
4015 (st_data_t)&foreach_data
4016 );
4017 }
4018 break;
4019 }
4020 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4021 if (vm->overloaded_cme_table) {
4022 st_foreach_with_replace(
4023 vm->overloaded_cme_table,
4024 vm_weak_table_foreach_weak_key,
4025 vm_weak_table_foreach_update_weak_key,
4026 (st_data_t)&foreach_data
4027 );
4028 }
4029 break;
4030 }
4031 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4032 rb_sym_global_symbol_table_foreach_weak_reference(
4033 vm_weak_table_sym_set_foreach,
4034 &foreach_data
4035 );
4036 break;
4037 }
4038 case RB_GC_VM_ID2REF_TABLE: {
4039 if (id2ref_tbl) {
4040 st_foreach_with_replace(
4041 id2ref_tbl,
4042 vm_weak_table_id2ref_foreach,
4043 vm_weak_table_id2ref_foreach_update,
4044 (st_data_t)&foreach_data
4045 );
4046 }
4047 break;
4048 }
4049 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4050 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4051 if (generic_fields_tbl) {
4052 st_foreach(
4053 generic_fields_tbl,
4054 vm_weak_table_gen_fields_foreach,
4055 (st_data_t)&foreach_data
4056 );
4057 }
4058 break;
4059 }
4060 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4061 rb_fstring_foreach_with_replace(
4062 vm_weak_table_frozen_strings_foreach,
4063 &foreach_data
4064 );
4065 break;
4066 }
4067 case RB_GC_VM_CC_REFINEMENT_TABLE: {
4068 if (vm->cc_refinement_table) {
4069 set_foreach_with_replace(
4070 vm->cc_refinement_table,
4071 vm_weak_table_cc_refinement_foreach,
4072 vm_weak_table_cc_refinement_foreach_update_update,
4073 (st_data_t)&foreach_data
4074 );
4075 }
4076 break;
4077 }
4078 case RB_GC_VM_WEAK_TABLE_COUNT:
4079 rb_bug("Unreachable");
4080 default:
4081 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4082 }
4083}
4084
4085void
4086rb_gc_update_vm_references(void *objspace)
4087{
4088 rb_execution_context_t *ec = GET_EC();
4089 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4090
4091 rb_vm_update_references(vm);
4092 rb_gc_update_global_tbl();
4093 rb_sym_global_symbols_mark_and_move();
4094
4095#if USE_YJIT
4096 void rb_yjit_root_update_references(void); // in Rust
4097
4098 if (rb_yjit_enabled_p) {
4099 rb_yjit_root_update_references();
4100 }
4101#endif
4102
4103#if USE_ZJIT
4104 void rb_zjit_root_update_references(void); // in Rust
4105
4106 if (rb_zjit_enabled_p) {
4107 rb_zjit_root_update_references();
4108 }
4109#endif
4110}
4111
4112void
4113rb_gc_update_object_references(void *objspace, VALUE obj)
4114{
4115 struct classext_foreach_args args;
4116
4117 switch (BUILTIN_TYPE(obj)) {
4118 case T_CLASS:
4119 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4120 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4121 }
4122 // Continue to the shared T_CLASS/T_MODULE
4123 case T_MODULE:
4124 args.klass = obj;
4125 args.objspace = objspace;
4126 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4127 break;
4128
4129 case T_ICLASS:
4130 args.objspace = objspace;
4131 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4132 break;
4133
4134 case T_IMEMO:
4135 rb_imemo_mark_and_move(obj, true);
4136 return;
4137
4138 case T_NIL:
4139 case T_FIXNUM:
4140 case T_NODE:
4141 case T_MOVED:
4142 case T_NONE:
4143 /* These can't move */
4144 return;
4145
4146 case T_ARRAY:
4147 gc_ref_update_array(objspace, obj);
4148 break;
4149
4150 case T_HASH:
4151 gc_ref_update_hash(objspace, obj);
4152 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4153 break;
4154
4155 case T_STRING:
4156 {
4157 if (STR_SHARED_P(obj)) {
4158 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4159 }
4160
4161 /* If, after move the string is not embedded, and can fit in the
4162 * slot it's been placed in, then re-embed it. */
4163 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4164 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4165 rb_str_make_embedded(obj);
4166 }
4167 }
4168
4169 break;
4170 }
4171 case T_DATA:
4172 /* Call the compaction callback, if it exists */
4173 {
4174 bool typed_data = RTYPEDDATA_P(obj);
4175 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4176
4177 if (typed_data) {
4178 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4179 }
4180
4181 if (ptr) {
4182 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4183 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4184
4185 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4186 VALUE *ref = (VALUE *)((char *)ptr + offset);
4187 *ref = gc_location_internal(objspace, *ref);
4188 }
4189 }
4190 else if (typed_data) {
4191 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4192 if (compact_func) (*compact_func)(ptr);
4193 }
4194 }
4195 }
4196 break;
4197
4198 case T_OBJECT:
4199 gc_ref_update_object(objspace, obj);
4200 break;
4201
4202 case T_FILE:
4203 if (RFILE(obj)->fptr) {
4204 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4205 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4206 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4207 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4208 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4209 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4210 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4211 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4212 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4213 }
4214 break;
4215 case T_REGEXP:
4216 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4217 break;
4218
4219 case T_SYMBOL:
4220 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4221 break;
4222
4223 case T_FLOAT:
4224 case T_BIGNUM:
4225 break;
4226
4227 case T_MATCH:
4228 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4229
4230 if (RMATCH(obj)->str) {
4231 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4232 }
4233 break;
4234
4235 case T_RATIONAL:
4236 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4237 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4238 break;
4239
4240 case T_COMPLEX:
4241 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4242 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4243
4244 break;
4245
4246 case T_STRUCT:
4247 {
4248 long i, len = RSTRUCT_LEN(obj);
4249 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4250
4251 for (i = 0; i < len; i++) {
4252 UPDATE_IF_MOVED(objspace, ptr[i]);
4253 }
4254
4255 if (RSTRUCT_EMBED_LEN(obj)) {
4256 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4257 UPDATE_IF_MOVED(objspace, ptr[len]);
4258 }
4259 }
4260 else {
4261 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4262 }
4263 }
4264 break;
4265 default:
4266 rb_bug("unreachable");
4267 break;
4268 }
4269
4270 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4271}
4272
4273VALUE
4274rb_gc_start(void)
4275{
4276 rb_gc();
4277 return Qnil;
4278}
4279
4280void
4281rb_gc(void)
4282{
4283 unless_objspace(objspace) { return; }
4284
4285 rb_gc_impl_start(objspace, true, true, true, false);
4286}
4287
4288int
4289rb_during_gc(void)
4290{
4291 unless_objspace(objspace) { return FALSE; }
4292
4293 return rb_gc_impl_during_gc_p(objspace);
4294}
4295
4296size_t
4297rb_gc_count(void)
4298{
4299 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4300}
4301
4302static VALUE
4303gc_count(rb_execution_context_t *ec, VALUE self)
4304{
4305 return SIZET2NUM(rb_gc_count());
4306}
4307
4308VALUE
4309rb_gc_latest_gc_info(VALUE key)
4310{
4311 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4312 rb_raise(rb_eTypeError, "non-hash or symbol given");
4313 }
4314
4315 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4316
4317 if (val == Qundef) {
4318 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4319 }
4320
4321 return val;
4322}
4323
4324static VALUE
4325gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4326{
4327 if (NIL_P(arg)) {
4328 arg = rb_hash_new();
4329 }
4330 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4331 rb_raise(rb_eTypeError, "non-hash or symbol given");
4332 }
4333
4334 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4335
4336 if (ret == Qundef) {
4337 GC_ASSERT(SYMBOL_P(arg));
4338
4339 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4340 }
4341
4342 return ret;
4343}
4344
4345size_t
4346rb_gc_stat(VALUE arg)
4347{
4348 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4349 rb_raise(rb_eTypeError, "non-hash or symbol given");
4350 }
4351
4352 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4353
4354 if (ret == Qundef) {
4355 GC_ASSERT(SYMBOL_P(arg));
4356
4357 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4358 }
4359
4360 if (SYMBOL_P(arg)) {
4361 return NUM2SIZET(ret);
4362 }
4363 else {
4364 return 0;
4365 }
4366}
4367
4368static VALUE
4369gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4370{
4371 if (NIL_P(arg)) {
4372 arg = rb_hash_new();
4373 }
4374
4375 if (NIL_P(heap_name)) {
4376 if (!RB_TYPE_P(arg, T_HASH)) {
4377 rb_raise(rb_eTypeError, "non-hash given");
4378 }
4379 }
4380 else if (FIXNUM_P(heap_name)) {
4381 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4382 rb_raise(rb_eTypeError, "non-hash or symbol given");
4383 }
4384 }
4385 else {
4386 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4387 }
4388
4389 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4390
4391 if (ret == Qundef) {
4392 GC_ASSERT(SYMBOL_P(arg));
4393
4394 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4395 }
4396
4397 return ret;
4398}
4399
4400static VALUE
4401gc_config_get(rb_execution_context_t *ec, VALUE self)
4402{
4403 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4404 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4405
4406 return cfg_hash;
4407}
4408
4409static VALUE
4410gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4411{
4412 void *objspace = rb_gc_get_objspace();
4413
4414 rb_gc_impl_config_set(objspace, hash);
4415
4416 return Qnil;
4417}
4418
4419static VALUE
4420gc_stress_get(rb_execution_context_t *ec, VALUE self)
4421{
4422 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4423}
4424
4425static VALUE
4426gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4427{
4428 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4429
4430 return flag;
4431}
4432
4433void
4434rb_gc_initial_stress_set(VALUE flag)
4435{
4436 initial_stress = flag;
4437}
4438
4439size_t *
4440rb_gc_heap_sizes(void)
4441{
4442 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4443}
4444
4445VALUE
4446rb_gc_enable(void)
4447{
4448 return rb_objspace_gc_enable(rb_gc_get_objspace());
4449}
4450
4451VALUE
4452rb_objspace_gc_enable(void *objspace)
4453{
4454 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4455 rb_gc_impl_gc_enable(objspace);
4456 return RBOOL(disabled);
4457}
4458
4459static VALUE
4460gc_enable(rb_execution_context_t *ec, VALUE _)
4461{
4462 return rb_gc_enable();
4463}
4464
4465static VALUE
4466gc_disable_no_rest(void *objspace)
4467{
4468 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4469 rb_gc_impl_gc_disable(objspace, false);
4470 return RBOOL(disabled);
4471}
4472
4473VALUE
4474rb_gc_disable_no_rest(void)
4475{
4476 return gc_disable_no_rest(rb_gc_get_objspace());
4477}
4478
4479VALUE
4480rb_gc_disable(void)
4481{
4482 return rb_objspace_gc_disable(rb_gc_get_objspace());
4483}
4484
4485VALUE
4486rb_objspace_gc_disable(void *objspace)
4487{
4488 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4489 rb_gc_impl_gc_disable(objspace, true);
4490 return RBOOL(disabled);
4491}
4492
4493static VALUE
4494gc_disable(rb_execution_context_t *ec, VALUE _)
4495{
4496 return rb_gc_disable();
4497}
4498
4499// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4500void
4501ruby_gc_set_params(void)
4502{
4503 rb_gc_impl_set_params(rb_gc_get_objspace());
4504}
4505
4506void
4507rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4508{
4509 RB_VM_LOCKING() {
4510 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4511
4512 if (!RB_SPECIAL_CONST_P(obj)) {
4513 rb_vm_t *vm = GET_VM();
4514 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4515 struct gc_mark_func_data_struct mfd = {
4516 .mark_func = func,
4517 .data = data,
4518 };
4519
4520 vm->gc.mark_func_data = &mfd;
4521 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4522 vm->gc.mark_func_data = prev_mfd;
4523 }
4524 }
4525}
4526
4528 const char *category;
4529 void (*func)(const char *category, VALUE, void *);
4530 void *data;
4531};
4532
4533static void
4534root_objects_from(VALUE obj, void *ptr)
4535{
4536 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4537 (*data->func)(data->category, obj, data->data);
4538}
4539
4540void
4541rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4542{
4543 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4544
4545 rb_vm_t *vm = GET_VM();
4546
4547 struct root_objects_data data = {
4548 .func = func,
4549 .data = passing_data,
4550 };
4551
4552 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4553 struct gc_mark_func_data_struct mfd = {
4554 .mark_func = root_objects_from,
4555 .data = &data,
4556 };
4557
4558 vm->gc.mark_func_data = &mfd;
4559 rb_gc_save_machine_context();
4560 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4561 vm->gc.mark_func_data = prev_mfd;
4562}
4563
4564/*
4565 ------------------------------ DEBUG ------------------------------
4566*/
4567
4568static const char *
4569type_name(int type, VALUE obj)
4570{
4571 switch (type) {
4572#define TYPE_NAME(t) case (t): return #t;
4573 TYPE_NAME(T_NONE);
4574 TYPE_NAME(T_OBJECT);
4575 TYPE_NAME(T_CLASS);
4576 TYPE_NAME(T_MODULE);
4577 TYPE_NAME(T_FLOAT);
4578 TYPE_NAME(T_STRING);
4579 TYPE_NAME(T_REGEXP);
4580 TYPE_NAME(T_ARRAY);
4581 TYPE_NAME(T_HASH);
4582 TYPE_NAME(T_STRUCT);
4583 TYPE_NAME(T_BIGNUM);
4584 TYPE_NAME(T_FILE);
4585 TYPE_NAME(T_MATCH);
4586 TYPE_NAME(T_COMPLEX);
4587 TYPE_NAME(T_RATIONAL);
4588 TYPE_NAME(T_NIL);
4589 TYPE_NAME(T_TRUE);
4590 TYPE_NAME(T_FALSE);
4591 TYPE_NAME(T_SYMBOL);
4592 TYPE_NAME(T_FIXNUM);
4593 TYPE_NAME(T_UNDEF);
4594 TYPE_NAME(T_IMEMO);
4595 TYPE_NAME(T_ICLASS);
4596 TYPE_NAME(T_MOVED);
4597 TYPE_NAME(T_ZOMBIE);
4598 case T_DATA:
4599 if (obj && rb_objspace_data_type_name(obj)) {
4600 return rb_objspace_data_type_name(obj);
4601 }
4602 return "T_DATA";
4603#undef TYPE_NAME
4604 }
4605 return "unknown";
4606}
4607
4608static const char *
4609obj_type_name(VALUE obj)
4610{
4611 return type_name(TYPE(obj), obj);
4612}
4613
4614const char *
4615rb_method_type_name(rb_method_type_t type)
4616{
4617 switch (type) {
4618 case VM_METHOD_TYPE_ISEQ: return "iseq";
4619 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4620 case VM_METHOD_TYPE_IVAR: return "ivar";
4621 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4622 case VM_METHOD_TYPE_ALIAS: return "alias";
4623 case VM_METHOD_TYPE_REFINED: return "refined";
4624 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4625 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4626 case VM_METHOD_TYPE_MISSING: return "missing";
4627 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4628 case VM_METHOD_TYPE_UNDEF: return "undef";
4629 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4630 }
4631 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4632}
4633
4634static void
4635rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4636{
4637 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4638 VALUE path = rb_iseq_path(iseq);
4639 int n = ISEQ_BODY(iseq)->location.first_lineno;
4640 snprintf(buff, buff_size, " %s@%s:%d",
4641 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4642 RSTRING_PTR(path), n);
4643 }
4644}
4645
4646static int
4647str_len_no_raise(VALUE str)
4648{
4649 long len = RSTRING_LEN(str);
4650 if (len < 0) return 0;
4651 if (len > INT_MAX) return INT_MAX;
4652 return (int)len;
4653}
4654
4655#define BUFF_ARGS buff + pos, buff_size - pos
4656#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4657#define APPEND_S(s) do { \
4658 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4659 goto end; \
4660 } \
4661 else { \
4662 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4663 } \
4664 } while (0)
4665#define C(c, s) ((c) != 0 ? (s) : " ")
4666
4667static size_t
4668rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4669{
4670 size_t pos = 0;
4671
4672 if (SPECIAL_CONST_P(obj)) {
4673 APPEND_F("%s", obj_type_name(obj));
4674
4675 if (FIXNUM_P(obj)) {
4676 APPEND_F(" %ld", FIX2LONG(obj));
4677 }
4678 else if (SYMBOL_P(obj)) {
4679 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4680 }
4681 }
4682 else {
4683 // const int age = RVALUE_AGE_GET(obj);
4684
4685 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4686 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4687 // TODO: fixme
4688 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4689 // (void *)obj, age,
4690 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4691 // C(RVALUE_MARK_BITMAP(obj), "M"),
4692 // C(RVALUE_PIN_BITMAP(obj), "P"),
4693 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4694 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4695 // C(rb_objspace_garbage_object_p(obj), "G"),
4696 // obj_type_name(obj));
4697 }
4698 else {
4699 /* fake */
4700 // APPEND_F("%p [%dXXXX] %s",
4701 // (void *)obj, age,
4702 // obj_type_name(obj));
4703 }
4704
4705 if (internal_object_p(obj)) {
4706 /* ignore */
4707 }
4708 else if (RBASIC(obj)->klass == 0) {
4709 APPEND_S("(temporary internal)");
4710 }
4711 else if (RTEST(RBASIC(obj)->klass)) {
4712 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4713 if (!NIL_P(class_path)) {
4714 APPEND_F("%s ", RSTRING_PTR(class_path));
4715 }
4716 }
4717 }
4718 end:
4719
4720 return pos;
4721}
4722
4723const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4724
4725static size_t
4726rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4727{
4728 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4729 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4730
4731 switch (type) {
4732 case T_NODE:
4733 UNEXPECTED_NODE(rb_raw_obj_info);
4734 break;
4735 case T_ARRAY:
4736 if (ARY_SHARED_P(obj)) {
4737 APPEND_S("shared -> ");
4738 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4739 }
4740 else {
4741 APPEND_F("[%s%s%s] ",
4742 C(ARY_EMBED_P(obj), "E"),
4743 C(ARY_SHARED_P(obj), "S"),
4744 C(ARY_SHARED_ROOT_P(obj), "R"));
4745
4746 if (ARY_EMBED_P(obj)) {
4747 APPEND_F("len: %ld (embed)",
4748 RARRAY_LEN(obj));
4749 }
4750 else {
4751 APPEND_F("len: %ld, capa:%ld ptr:%p",
4752 RARRAY_LEN(obj),
4753 RARRAY(obj)->as.heap.aux.capa,
4754 (void *)RARRAY_CONST_PTR(obj));
4755 }
4756 }
4757 break;
4758 case T_STRING: {
4759 if (STR_SHARED_P(obj)) {
4760 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4761 }
4762 else {
4763 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4764
4765 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4766 }
4767 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4768 break;
4769 }
4770 case T_SYMBOL: {
4771 VALUE fstr = RSYMBOL(obj)->fstr;
4772 ID id = RSYMBOL(obj)->id;
4773 if (RB_TYPE_P(fstr, T_STRING)) {
4774 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4775 }
4776 else {
4777 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4778 }
4779 break;
4780 }
4781 case T_MOVED: {
4782 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4783 break;
4784 }
4785 case T_HASH: {
4786 APPEND_F("[%c] %"PRIdSIZE,
4787 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4788 RHASH_SIZE(obj));
4789 break;
4790 }
4791 case T_CLASS:
4792 case T_MODULE:
4793 {
4794 VALUE class_path = rb_class_path_cached(obj);
4795 if (!NIL_P(class_path)) {
4796 APPEND_F("%s", RSTRING_PTR(class_path));
4797 }
4798 else {
4799 APPEND_S("(anon)");
4800 }
4801 break;
4802 }
4803 case T_ICLASS:
4804 {
4805 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4806 if (!NIL_P(class_path)) {
4807 APPEND_F("src:%s", RSTRING_PTR(class_path));
4808 }
4809 break;
4810 }
4811 case T_OBJECT:
4812 {
4813 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
4814 if (rb_shape_obj_too_complex_p(obj)) {
4815 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4816 APPEND_F("(too_complex) len:%zu", hash_len);
4817 }
4818 else {
4819 APPEND_F("(embed) len:%d", ROBJECT_FIELDS_CAPACITY(obj));
4820 }
4821 }
4822 else {
4823 APPEND_F("len:%d ptr:%p", ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
4824 }
4825 }
4826 break;
4827 case T_DATA: {
4828 const struct rb_block *block;
4829 const rb_iseq_t *iseq;
4830 if (rb_obj_is_proc(obj) &&
4831 (block = vm_proc_block(obj)) != NULL &&
4832 (vm_block_type(block) == block_type_iseq) &&
4833 (iseq = vm_block_iseq(block)) != NULL) {
4834 rb_raw_iseq_info(BUFF_ARGS, iseq);
4835 }
4836 else if (rb_ractor_p(obj)) {
4837 rb_ractor_t *r = (void *)DATA_PTR(obj);
4838 if (r) {
4839 APPEND_F("r:%d", r->pub.id);
4840 }
4841 }
4842 else {
4843 const char * const type_name = rb_objspace_data_type_name(obj);
4844 if (type_name) {
4845 APPEND_F("%s", type_name);
4846 }
4847 }
4848 break;
4849 }
4850 case T_IMEMO: {
4851 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4852
4853 switch (imemo_type(obj)) {
4854 case imemo_ment:
4855 {
4856 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4857
4858 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4859 rb_id2name(me->called_id),
4860 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4861 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4862 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4863 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4864 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4865 me->def ? rb_method_type_name(me->def->type) : "NULL",
4866 me->def ? me->def->aliased : -1,
4867 (void *)me->owner, // obj_info(me->owner),
4868 (void *)me->defined_class); //obj_info(me->defined_class)));
4869
4870 if (me->def) {
4871 switch (me->def->type) {
4872 case VM_METHOD_TYPE_ISEQ:
4873 APPEND_S(" (iseq:");
4874 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4875 APPEND_S(")");
4876 break;
4877 default:
4878 break;
4879 }
4880 }
4881
4882 break;
4883 }
4884 case imemo_iseq: {
4885 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4886 rb_raw_iseq_info(BUFF_ARGS, iseq);
4887 break;
4888 }
4889 case imemo_callinfo:
4890 {
4891 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4892 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4893 rb_id2name(vm_ci_mid(ci)),
4894 vm_ci_flag(ci),
4895 vm_ci_argc(ci),
4896 vm_ci_kwarg(ci) ? "available" : "NULL");
4897 break;
4898 }
4899 case imemo_callcache:
4900 {
4901 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4902 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
4903 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4904
4905 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4906 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4907 cme ? rb_id2name(cme->called_id) : "<NULL>",
4908 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4909 (void *)cme,
4910 (void *)(uintptr_t)vm_cc_call(cc));
4911 break;
4912 }
4913 default:
4914 break;
4915 }
4916 }
4917 default:
4918 break;
4919 }
4920 }
4921 end:
4922
4923 return pos;
4924}
4925
4926#undef C
4927
4928#ifdef RUBY_ASAN_ENABLED
4929void
4930rb_asan_poison_object(VALUE obj)
4931{
4932 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4933 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4934}
4935
4936void
4937rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4938{
4939 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4940 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4941}
4942
4943void *
4944rb_asan_poisoned_object_p(VALUE obj)
4945{
4946 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4947 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4948}
4949#endif
4950
4951static void
4952raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4953{
4954 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4955 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4956 if (pos >= buff_size) {} // truncated
4957}
4958
4959const char *
4960rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4961{
4962 void *objspace = rb_gc_get_objspace();
4963
4964 if (SPECIAL_CONST_P(obj)) {
4965 raw_obj_info(buff, buff_size, obj);
4966 }
4967 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
4968 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
4969 }
4970#if 0 // maybe no need to check it?
4971 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
4972 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
4973 }
4974#endif
4975 else {
4976 asan_unpoisoning_object(obj) {
4977 raw_obj_info(buff, buff_size, obj);
4978 }
4979 }
4980 return buff;
4981}
4982
4983#undef APPEND_S
4984#undef APPEND_F
4985#undef BUFF_ARGS
4986
4987/* Increments *var atomically and resets *var to 0 when maxval is
4988 * reached. Returns the wraparound old *var value (0...maxval). */
4989static rb_atomic_t
4990atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4991{
4992 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4993 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4994 const rb_atomic_t newval = oldval + 1;
4995 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4996 oldval %= maxval;
4997 }
4998 return oldval;
4999}
5000
5001static const char *
5002obj_info(VALUE obj)
5003{
5004 if (RGENGC_OBJ_INFO) {
5005 static struct {
5006 rb_atomic_t index;
5007 char buffers[10][0x100];
5008 } info = {0};
5009
5010 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5011 char *const buff = info.buffers[index];
5012 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5013 }
5014 return obj_type_name(obj);
5015}
5016
5017/*
5018 ------------------------ Extended allocator ------------------------
5019*/
5020
5022 VALUE exc;
5023 const char *fmt;
5024 va_list *ap;
5025};
5026
5027static void *
5028gc_vraise(void *ptr)
5029{
5030 struct gc_raise_tag *argv = ptr;
5031 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5032 UNREACHABLE_RETURN(NULL);
5033}
5034
5035static void
5036gc_raise(VALUE exc, const char *fmt, ...)
5037{
5038 va_list ap;
5039 va_start(ap, fmt);
5040 struct gc_raise_tag argv = {
5041 exc, fmt, &ap,
5042 };
5043
5044 if (ruby_thread_has_gvl_p()) {
5045 gc_vraise(&argv);
5047 }
5048 else if (ruby_native_thread_p()) {
5049 rb_thread_call_with_gvl(gc_vraise, &argv);
5051 }
5052 else {
5053 /* Not in a ruby thread */
5054 fprintf(stderr, "%s", "[FATAL] ");
5055 vfprintf(stderr, fmt, ap);
5056 }
5057
5058 va_end(ap);
5059 abort();
5060}
5061
5062NORETURN(static void negative_size_allocation_error(const char *));
5063static void
5064negative_size_allocation_error(const char *msg)
5065{
5066 gc_raise(rb_eNoMemError, "%s", msg);
5067}
5068
5069static void *
5070ruby_memerror_body(void *dummy)
5071{
5072 rb_memerror();
5073 return 0;
5074}
5075
5076NORETURN(static void ruby_memerror(void));
5078static void
5079ruby_memerror(void)
5080{
5081 if (ruby_thread_has_gvl_p()) {
5082 rb_memerror();
5083 }
5084 else {
5085 if (ruby_native_thread_p()) {
5086 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5087 }
5088 else {
5089 /* no ruby thread */
5090 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5091 }
5092 }
5093
5094 /* We have discussions whether we should die here; */
5095 /* We might rethink about it later. */
5096 exit(EXIT_FAILURE);
5097}
5098
5099void
5100rb_memerror(void)
5101{
5102 /* the `GET_VM()->special_exceptions` below assumes that
5103 * the VM is reachable from the current thread. We should
5104 * definitely make sure of that. */
5105 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5106
5107 rb_execution_context_t *ec = GET_EC();
5108 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5109
5110 if (!exc ||
5111 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5112 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5113 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5114 exit(EXIT_FAILURE);
5115 }
5116 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5117 rb_ec_raised_clear(ec);
5118 }
5119 else {
5120 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5121 exc = ruby_vm_special_exception_copy(exc);
5122 }
5123 ec->errinfo = exc;
5124 EC_JUMP_TAG(ec, TAG_RAISE);
5125}
5126
5127bool
5128rb_memerror_reentered(void)
5129{
5130 rb_execution_context_t *ec = GET_EC();
5131 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5132}
5133
5134static void *
5135handle_malloc_failure(void *ptr)
5136{
5137 if (LIKELY(ptr)) {
5138 return ptr;
5139 }
5140 else {
5141 ruby_memerror();
5142 UNREACHABLE_RETURN(ptr);
5143 }
5144}
5145
5146static void *ruby_xmalloc_body(size_t size);
5147
5148void *
5149ruby_xmalloc(size_t size)
5150{
5151 return handle_malloc_failure(ruby_xmalloc_body(size));
5152}
5153
5154static bool
5155malloc_gc_allowed(void)
5156{
5157 rb_ractor_t *r = rb_current_ractor_raw(false);
5158
5159 return r == NULL || !r->malloc_gc_disabled;
5160}
5161
5162static void *
5163ruby_xmalloc_body(size_t size)
5164{
5165 if ((ssize_t)size < 0) {
5166 negative_size_allocation_error("too large allocation size");
5167 }
5168
5169 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5170}
5171
5172void
5173ruby_malloc_size_overflow(size_t count, size_t elsize)
5174{
5175 rb_raise(rb_eArgError,
5176 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5177 count, elsize);
5178}
5179
5180void
5181ruby_malloc_add_size_overflow(size_t x, size_t y)
5182{
5183 rb_raise(rb_eArgError,
5184 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5185 x, y);
5186}
5187
5188static void *ruby_xmalloc2_body(size_t n, size_t size);
5189
5190void *
5191ruby_xmalloc2(size_t n, size_t size)
5192{
5193 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5194}
5195
5196static void *
5197ruby_xmalloc2_body(size_t n, size_t size)
5198{
5199 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5200}
5201
5202static void *ruby_xcalloc_body(size_t n, size_t size);
5203
5204void *
5205ruby_xcalloc(size_t n, size_t size)
5206{
5207 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5208}
5209
5210static void *
5211ruby_xcalloc_body(size_t n, size_t size)
5212{
5213 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5214}
5215
5216static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5217
5218#ifdef ruby_sized_xrealloc
5219#undef ruby_sized_xrealloc
5220#endif
5221void *
5222ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5223{
5224 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5225}
5226
5227static void *
5228ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5229{
5230 if ((ssize_t)new_size < 0) {
5231 negative_size_allocation_error("too large allocation size");
5232 }
5233
5234 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5235}
5236
5237void *
5238ruby_xrealloc(void *ptr, size_t new_size)
5239{
5240 return ruby_sized_xrealloc(ptr, new_size, 0);
5241}
5242
5243static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5244
5245#ifdef ruby_sized_xrealloc2
5246#undef ruby_sized_xrealloc2
5247#endif
5248void *
5249ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5250{
5251 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5252}
5253
5254static void *
5255ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5256{
5257 size_t len = xmalloc2_size(n, size);
5258 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5259}
5260
5261void *
5262ruby_xrealloc2(void *ptr, size_t n, size_t size)
5263{
5264 return ruby_sized_xrealloc2(ptr, n, size, 0);
5265}
5266
5267#ifdef ruby_sized_xfree
5268#undef ruby_sized_xfree
5269#endif
5270void
5271ruby_sized_xfree(void *x, size_t size)
5272{
5273 if (LIKELY(x)) {
5274 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5275 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5276 * that case. */
5277 if (LIKELY(GET_VM())) {
5278 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5279 }
5280 else {
5281 ruby_mimfree(x);
5282 }
5283 }
5284}
5285
5286void
5287ruby_xfree(void *x)
5288{
5289 ruby_sized_xfree(x, 0);
5290}
5291
5292void *
5293rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5294{
5295 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5296 return ruby_xmalloc(w);
5297}
5298
5299void *
5300rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5301{
5302 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5303 return ruby_xcalloc(w, 1);
5304}
5305
5306void *
5307rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5308{
5309 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5310 return ruby_xrealloc((void *)p, w);
5311}
5312
5313void *
5314rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5315{
5316 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5317 return ruby_xmalloc(u);
5318}
5319
5320void *
5321rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5322{
5323 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5324 return ruby_xcalloc(u, 1);
5325}
5326
5327/* Mimic ruby_xmalloc, but need not rb_objspace.
5328 * should return pointer suitable for ruby_xfree
5329 */
5330void *
5331ruby_mimmalloc(size_t size)
5332{
5333 void *mem;
5334#if CALC_EXACT_MALLOC_SIZE
5335 size += sizeof(struct malloc_obj_info);
5336#endif
5337 mem = malloc(size);
5338#if CALC_EXACT_MALLOC_SIZE
5339 if (!mem) {
5340 return NULL;
5341 }
5342 else
5343 /* set 0 for consistency of allocated_size/allocations */
5344 {
5345 struct malloc_obj_info *info = mem;
5346 info->size = 0;
5347 mem = info + 1;
5348 }
5349#endif
5350 return mem;
5351}
5352
5353void *
5354ruby_mimcalloc(size_t num, size_t size)
5355{
5356 void *mem;
5357#if CALC_EXACT_MALLOC_SIZE
5358 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5359 if (UNLIKELY(t.left)) {
5360 return NULL;
5361 }
5362 size = t.right + sizeof(struct malloc_obj_info);
5363 mem = calloc1(size);
5364 if (!mem) {
5365 return NULL;
5366 }
5367 else
5368 /* set 0 for consistency of allocated_size/allocations */
5369 {
5370 struct malloc_obj_info *info = mem;
5371 info->size = 0;
5372 mem = info + 1;
5373 }
5374#else
5375 mem = calloc(num, size);
5376#endif
5377 return mem;
5378}
5379
5380void
5381ruby_mimfree(void *ptr)
5382{
5383#if CALC_EXACT_MALLOC_SIZE
5384 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5385 ptr = info;
5386#endif
5387 free(ptr);
5388}
5389
5390void
5391rb_gc_adjust_memory_usage(ssize_t diff)
5392{
5393 unless_objspace(objspace) { return; }
5394
5395 rb_gc_impl_adjust_memory_usage(objspace, diff);
5396}
5397
5398const char *
5399rb_obj_info(VALUE obj)
5400{
5401 return obj_info(obj);
5402}
5403
5404void
5405rb_obj_info_dump(VALUE obj)
5406{
5407 char buff[0x100];
5408 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5409}
5410
5411void
5412rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5413{
5414 char buff[0x100];
5415 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5416}
5417
5418void
5419rb_gc_before_fork(void)
5420{
5421 rb_gc_impl_before_fork(rb_gc_get_objspace());
5422}
5423
5424void
5425rb_gc_after_fork(rb_pid_t pid)
5426{
5427 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5428}
5429
5430bool
5431rb_gc_obj_shareable_p(VALUE obj)
5432{
5433 return RB_OBJ_SHAREABLE_P(obj);
5434}
5435
5436void
5437rb_gc_rp(VALUE obj)
5438{
5439 rp(obj);
5440}
5441
5442/*
5443 * Document-module: ObjectSpace
5444 *
5445 * The ObjectSpace module contains a number of routines
5446 * that interact with the garbage collection facility and allow you to
5447 * traverse all living objects with an iterator.
5448 *
5449 * ObjectSpace also provides support for object finalizers, procs that will be
5450 * called after a specific object was destroyed by garbage collection. See
5451 * the documentation for +ObjectSpace.define_finalizer+ for important
5452 * information on how to use this method correctly.
5453 *
5454 * a = "A"
5455 * b = "B"
5456 *
5457 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5458 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5459 *
5460 * a = nil
5461 * b = nil
5462 *
5463 * _produces:_
5464 *
5465 * Finalizer two on 537763470
5466 * Finalizer one on 537763480
5467 */
5468
5469/* Document-class: GC::Profiler
5470 *
5471 * The GC profiler provides access to information on GC runs including time,
5472 * length and object space size.
5473 *
5474 * Example:
5475 *
5476 * GC::Profiler.enable
5477 *
5478 * require 'rdoc/rdoc'
5479 *
5480 * GC::Profiler.report
5481 *
5482 * GC::Profiler.disable
5483 *
5484 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5485 */
5486
5487#include "gc.rbinc"
5488
5489void
5490Init_GC(void)
5491{
5492#undef rb_intern
5493 rb_gc_register_address(&id2ref_value);
5494
5495 malloc_offset = gc_compute_malloc_offset();
5496
5497 rb_mGC = rb_define_module("GC");
5498
5499 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5500
5501 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5502
5503 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5504 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5505
5506 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5507
5508 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5509
5510 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5511 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5512
5513 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5514
5515 rb_gc_impl_init();
5516}
5517
5518// Set a name for the anonymous virtual memory area. `addr` is the starting
5519// address of the area and `size` is its length in bytes. `name` is a
5520// NUL-terminated human-readable string.
5521//
5522// This function is usually called after calling `mmap()`. The human-readable
5523// annotation helps developers identify the call site of `mmap()` that created
5524// the memory mapping.
5525//
5526// This function currently only works on Linux 5.17 or higher. After calling
5527// this function, we can see annotations in the form of "[anon:...]" in
5528// `/proc/self/maps`, where `...` is the content of `name`. This function has
5529// no effect when called on other platforms.
5530void
5531ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5532{
5533#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5534 // The name length cannot exceed 80 (including the '\0').
5535 RUBY_ASSERT(strlen(name) < 80);
5536 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5537 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5538 // reasons.
5539 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5540 // 2. addr is an invalid address.
5541 // 3. The string pointed by name is too long.
5542 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5543 // happen if we run the compiled binary on an old kernel. In theory, all
5544 // other errors should result in a failure. But since EINVAL cannot tell
5545 // the first error from others, and this function is mainly used for
5546 // debugging, we silently ignore the error.
5547 errno = 0;
5548#endif
5549}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1700
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3241
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:133
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2547
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2587
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_mGC
GC module.
Definition gc.c:423
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:94
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3262
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:937
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1742
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:991
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1271
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1624
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1630
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3345
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5681
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2046
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1061
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1076
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:582
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1100
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1110
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:455
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:95
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:605
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9074
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5768
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:85
"Typed" user data.
Definition rtypeddata.h:352
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:372
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:358
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:232
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:253
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:209
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:223
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:311
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113