Ruby 4.1.0dev (2026-03-06 revision d5d144c149d3beabbfb262e3994f60552469181b)
gc.h
1#ifndef INTERNAL_GC_H /*-*-C-*-vi:se ft=c:*/
2#define INTERNAL_GC_H
11#include "ruby/internal/config.h"
12
13#include <stddef.h> /* for size_t */
14
15#include "internal/compilers.h" /* for __has_attribute */
16#include "ruby/ruby.h" /* for rb_event_flag_t */
17#include "vm_core.h" /* for GET_EC() */
18
19#ifndef USE_MODULAR_GC
20# define USE_MODULAR_GC 0
21#endif
22
23#if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
24#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
25#elif defined(__i386) && defined(__GNUC__)
26#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
27#elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
28#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
29#elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
30#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
31#elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
32#define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
33#elif defined(__aarch64__) && defined(__GNUC__)
34#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
35#else
36NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
37#define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
38#define USE_CONSERVATIVE_STACK_END
39#endif
40
41/* for GC debug */
42
43#ifndef RUBY_MARK_FREE_DEBUG
44#define RUBY_MARK_FREE_DEBUG 0
45#endif
46
47#if RUBY_MARK_FREE_DEBUG
48extern int ruby_gc_debug_indent;
49
50static inline void
51rb_gc_debug_indent(void)
52{
53 ruby_debug_printf("%*s", ruby_gc_debug_indent, "");
54}
55
56static inline void
57rb_gc_debug_body(const char *mode, const char *msg, int st, void *ptr)
58{
59 if (st == 0) {
60 ruby_gc_debug_indent--;
61 }
62 rb_gc_debug_indent();
63 ruby_debug_printf("%s: %s %s (%p)\n", mode, st ? "->" : "<-", msg, ptr);
64
65 if (st) {
66 ruby_gc_debug_indent++;
67 }
68
69 fflush(stdout);
70}
71
72#define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
73#define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
74#define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
75#define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
76#define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
77
78#else
79#define RUBY_MARK_ENTER(msg)
80#define RUBY_MARK_LEAVE(msg)
81#define RUBY_FREE_ENTER(msg)
82#define RUBY_FREE_LEAVE(msg)
83#define RUBY_GC_INFO if(0)printf
84#endif
85
86#if STACK_GROW_DIRECTION > 0
87# define STACK_UPPER(x, a, b) (a)
88#elif STACK_GROW_DIRECTION < 0
89# define STACK_UPPER(x, a, b) (b)
90#else
91RUBY_EXTERN int ruby_stack_grow_direction;
92int ruby_get_stack_grow_direction(volatile VALUE *addr);
93# define stack_growup_p(x) ( \
94 (ruby_stack_grow_direction ? \
95 ruby_stack_grow_direction : \
96 ruby_get_stack_grow_direction(x)) > 0)
97# define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
98#endif
99
100/*
101 STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER.
102
103 On most normal systems, stacks grow from high address to lower address. In
104 this case, STACK_DIR_UPPER(a, b) will return (b), but on exotic systems where
105 the stack grows UP (from low address to high address), it will return (a).
106*/
107
108#if STACK_GROW_DIRECTION
109#define STACK_GROW_DIR_DETECTION
110#define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
111#else
112#define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
113#define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
114#endif
115#define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
116
117const char *rb_obj_info(VALUE obj);
118const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
119
120struct rb_execution_context_struct; /* in vm_core.h */
121struct rb_objspace; /* in vm_core.h */
122
123#define NEWOBJ_OF_WITH_SHAPE(var, T, c, f, shape_id, s, ec) \
124 T *(var) = (T *)(((f) & FL_WB_PROTECTED) ? \
125 rb_wb_protected_newobj_of((ec ? ec : GET_EC()), (c), (f) & ~FL_WB_PROTECTED, shape_id, s) : \
126 rb_wb_unprotected_newobj_of((c), (f), shape_id, s))
127
128#define NEWOBJ_OF(var, T, c, f, s, ec) NEWOBJ_OF_WITH_SHAPE(var, T, c, f, 0 /* ROOT_SHAPE_ID */, s, ec)
129
130#ifndef RB_GC_OBJECT_METADATA_ENTRY_DEFINED
131# define RB_GC_OBJECT_METADATA_ENTRY_DEFINED
133 ID name;
134 VALUE val;
135};
136#endif
137
138#ifndef USE_UNALIGNED_MEMBER_ACCESS
139# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
140#elif ! USE_UNALIGNED_MEMBER_ACCESS
141# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
142#elif ! (__has_warning("-Waddress-of-packed-member") || GCC_VERSION_SINCE(9, 0, 0))
143# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
144#else
145# include "internal/warnings.h"
146# define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
147 COMPILER_WARNING_PUSH; \
148 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
149 __typeof__(expr) unaligned_member_access_result = (expr); \
150 COMPILER_WARNING_POP; \
151 unaligned_member_access_result; \
152})
153
154# define UNALIGNED_MEMBER_PTR(ptr, mem) __extension__({ \
155 COMPILER_WARNING_PUSH; \
156 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
157 const volatile void *unaligned_member_ptr_result = &(ptr)->mem; \
158 COMPILER_WARNING_POP; \
159 (__typeof__((ptr)->mem) *)unaligned_member_ptr_result; \
160})
161#endif
162
163#ifndef UNALIGNED_MEMBER_PTR
164# define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
165#endif
166
167#define RB_OBJ_WRITE_UNALIGNED(old, slot, young) do { \
168 VALUE *_slot = UNALIGNED_MEMBER_ACCESS(slot); \
169 RB_OBJ_WRITE(old, _slot, young); \
170} while (0)
171
172/* Used in places that could malloc during, which can cause the GC to run. We
173 * need to temporarily disable the GC to allow the malloc to happen.
174 * Allocating memory during GC is a bad idea, so use this only when absolutely
175 * necessary. */
176#define DURING_GC_COULD_MALLOC_REGION_START() \
177 assert(rb_during_gc()); \
178 VALUE _already_disabled = rb_gc_disable_no_rest()
179
180#define DURING_GC_COULD_MALLOC_REGION_END() \
181 if (_already_disabled == Qfalse) rb_gc_enable()
182
183/* gc.c */
184RUBY_ATTR_MALLOC void *ruby_mimmalloc(size_t size);
185RUBY_ATTR_MALLOC void *ruby_mimcalloc(size_t num, size_t size);
186void ruby_mimfree(void *ptr);
187void rb_gc_prepare_heap(void);
188void rb_objspace_set_event_hook(const rb_event_flag_t event);
189VALUE rb_objspace_gc_enable(void *objspace);
190VALUE rb_objspace_gc_disable(void *objspace);
191void ruby_gc_set_params(void);
192void rb_gc_copy_attributes(VALUE dest, VALUE obj);
193size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
194size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE); /* used in iseq.h */
195size_t rb_malloc_grow_capa(size_t current_capacity, size_t type_size);
196RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add(size_t, size_t, size_t);
197RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add(size_t, size_t, size_t);
198void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
199RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t);
200RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
201void rb_gc_obj_id_moved(VALUE obj);
202void rb_gc_register_pinning_obj(VALUE obj);
203
204void *rb_gc_ractor_cache_alloc(rb_ractor_t *ractor);
205void rb_gc_ractor_cache_free(void *cache);
206
207bool rb_gc_size_allocatable_p(size_t size);
208size_t *rb_gc_heap_sizes(void);
209size_t rb_gc_heap_id_for_size(size_t size);
210
211void rb_gc_mark_and_move(VALUE *ptr);
212
213void rb_gc_declare_weak_references(VALUE obj);
214bool rb_gc_handle_weak_references_alive_p(VALUE obj);
215
216void rb_gc_ref_update_table_values_only(st_table *tbl);
217
218void rb_gc_initial_stress_set(VALUE flag);
219
220void rb_gc_before_fork(void);
221void rb_gc_after_fork(rb_pid_t pid);
222
223#define rb_gc_mark_and_move_ptr(ptr) do { \
224 VALUE _obj = (VALUE)*(ptr); \
225 rb_gc_mark_and_move(&_obj); \
226 if (_obj != (VALUE)*(ptr)) *(ptr) = (void *)_obj; \
227} while (0)
228
229RUBY_SYMBOL_EXPORT_BEGIN
230/* exports for objspace module */
231void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data);
232void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *data);
233int rb_objspace_internal_object_p(VALUE obj);
234int rb_objspace_garbage_object_p(VALUE obj);
235bool rb_gc_pointer_to_heap_p(VALUE obj);
236
237void rb_objspace_each_objects(
238 int (*callback)(void *start, void *end, size_t stride, void *data),
239 void *data);
240
241size_t rb_gc_obj_slot_size(VALUE obj);
242
243VALUE rb_gc_disable_no_rest(void);
244
245#define RB_GC_MAX_NAME_LEN 20
246
247/* gc.c (export) */
248const char *rb_objspace_data_type_name(VALUE obj);
249VALUE rb_wb_protected_newobj_of(struct rb_execution_context_struct *, VALUE, VALUE, uint32_t /* shape_id_t */, size_t);
250VALUE rb_wb_unprotected_newobj_of(VALUE, VALUE, uint32_t /* shape_id_t */, size_t);
251size_t rb_obj_memsize_of(VALUE);
252struct rb_gc_object_metadata_entry *rb_gc_object_metadata(VALUE obj);
253void rb_gc_mark_values(long n, const VALUE *values);
254void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
255void rb_gc_update_values(long n, VALUE *values);
256void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
257void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
258void ruby_sized_xfree(void *x, size_t size);
259
260const char *rb_gc_active_gc_name(void);
261int rb_gc_modular_gc_loaded_p(void);
262
263RUBY_SYMBOL_EXPORT_END
264
265static inline VALUE
266rb_obj_atomic_write(
267 VALUE a, VALUE *slot, VALUE b,
269 const char *filename,
271 int line)
272{
273#ifdef RGENGC_LOGGING_WRITE
274 RGENGC_LOGGING_WRITE(a, slot, b, filename, line);
275#endif
276
277 RUBY_ATOMIC_VALUE_SET(*slot, b);
278
279 rb_obj_written(a, RUBY_Qundef /* ignore `oldv' now */, b, filename, line);
280 return a;
281}
282#define RB_OBJ_ATOMIC_WRITE(old, slot, young) \
283 RBIMPL_CAST(rb_obj_atomic_write((VALUE)(old), (VALUE *)(slot), (VALUE)(young), __FILE__, __LINE__))
284
285int rb_ec_stack_check(struct rb_execution_context_struct *ec);
286void rb_gc_writebarrier_remember(VALUE obj);
287const char *rb_obj_info(VALUE obj);
288void ruby_annotate_mmap(const void *addr, unsigned long size, const char *name);
289
290# define SIZED_REALLOC_N(v, T, m, n) \
291 ((v) = (T *)ruby_sized_xrealloc2((void *)(v), (m), sizeof(T), (n)))
292
293# define SIZED_FREE(v) ruby_sized_xfree((void *)(v), sizeof(*(v)))
294# define SIZED_FREE_N(v, n) ruby_sized_xfree((void *)(v), sizeof(*(v)) * (n))
295
296static inline void *
297ruby_sized_realloc_n(void *ptr, size_t new_count, size_t element_size, size_t old_count)
298{
299 return ruby_sized_xrealloc2(ptr, new_count, element_size, old_count);
300}
301
302void rb_gc_verify_shareable(VALUE);
303bool rb_gc_checking_shareable(void);
304
305#endif /* INTERNAL_GC_H */
#define RUBY_ATOMIC_VALUE_SET(var, val)
Identical to RUBY_ATOMIC_SET, except it expects its arguments are VALUE.
Definition atomic.h:378
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
@ RUBY_Qundef
Represents so-called undef.
Definition gc_impl.h:15
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40