Ruby 4.0.0dev (2025-12-06 revision a7dc53b91c8475323b34d5a332fdb25d190e277d)
gc.c (a7dc53b91c8475323b34d5a332fdb25d190e277d)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/concurrent_set.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "variable.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129#include "yjit.h"
130#include "zjit.h"
131
132#include "builtin.h"
133#include "shape.h"
134
135unsigned int
136rb_gc_vm_lock(const char *file, int line)
137{
138 unsigned int lev = 0;
139 rb_vm_lock_enter(&lev, file, line);
140 return lev;
141}
142
143void
144rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
145{
146 rb_vm_lock_leave(&lev, file, line);
147}
148
149unsigned int
150rb_gc_cr_lock(const char *file, int line)
151{
152 unsigned int lev;
153 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
154 return lev;
155}
156
157void
158rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
159{
160 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
161}
162
163unsigned int
164rb_gc_vm_lock_no_barrier(const char *file, int line)
165{
166 unsigned int lev = 0;
167 rb_vm_lock_enter_nb(&lev, file, line);
168 return lev;
169}
170
171void
172rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
173{
174 rb_vm_lock_leave_nb(&lev, file, line);
175}
176
177void
178rb_gc_vm_barrier(void)
179{
180 rb_vm_barrier();
181}
182
183void *
184rb_gc_get_ractor_newobj_cache(void)
185{
186 return GET_RACTOR()->newobj_cache;
187}
188
189#if USE_MODULAR_GC
190void
191rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
192{
193 rb_native_mutex_initialize(&context->lock);
194 context->ec = GET_EC();
195}
196
197void
198rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
199{
200 rb_native_mutex_lock(&context->lock);
201
202 GC_ASSERT(rb_current_execution_context(false) == NULL);
203
204#ifdef RB_THREAD_LOCAL_SPECIFIER
205 rb_current_ec_set(context->ec);
206#else
207 native_tls_set(ruby_current_ec_key, context->ec);
208#endif
209}
210
211void
212rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
213{
214 rb_native_mutex_unlock(&context->lock);
215
216 GC_ASSERT(rb_current_execution_context(true) == context->ec);
217
218#ifdef RB_THREAD_LOCAL_SPECIFIER
219 rb_current_ec_set(NULL);
220#else
221 native_tls_set(ruby_current_ec_key, NULL);
222#endif
223}
224#endif
225
226bool
227rb_gc_event_hook_required_p(rb_event_flag_t event)
228{
229 return ruby_vm_event_flags & event;
230}
231
232void
233rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
234{
235 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
236
237 rb_execution_context_t *ec = GET_EC();
238 if (!ec->cfp) return;
239
240 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
241}
242
243void *
244rb_gc_get_objspace(void)
245{
246 return GET_VM()->gc.objspace;
247}
248
249
250void
251rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
252{
253 rb_ractor_t *r = NULL;
254 if (RB_LIKELY(ruby_single_main_ractor)) {
255 GC_ASSERT(
256 ccan_list_empty(&GET_VM()->ractor.set) ||
257 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
258 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
259 );
260
261 func(ruby_single_main_ractor->newobj_cache, data);
262 }
263 else {
264 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
265 func(r->newobj_cache, data);
266 }
267 }
268}
269
270void
271rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
272{
273 volatile struct {
274 VALUE errinfo;
275 VALUE final;
277 VALUE *sp;
278 long finished;
279 } saved;
280
281 rb_execution_context_t * volatile ec = GET_EC();
282#define RESTORE_FINALIZER() (\
283 ec->cfp = saved.cfp, \
284 ec->cfp->sp = saved.sp, \
285 ec->errinfo = saved.errinfo)
286
287 saved.errinfo = ec->errinfo;
288 saved.cfp = ec->cfp;
289 saved.sp = ec->cfp->sp;
290 saved.finished = 0;
291 saved.final = Qundef;
292
293 ASSERT_vm_unlocking();
294 rb_ractor_ignore_belonging(true);
295 EC_PUSH_TAG(ec);
296 enum ruby_tag_type state = EC_EXEC_TAG();
297 if (state != TAG_NONE) {
298 ++saved.finished; /* skip failed finalizer */
299
300 VALUE failed_final = saved.final;
301 saved.final = Qundef;
302 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
303 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
304 rb_ec_error_print(ec, ec->errinfo);
305 }
306 }
307
308 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
309 saved.final = callback(i, data);
310 rb_check_funcall(saved.final, idCall, 1, &objid);
311 }
312 EC_POP_TAG();
313 rb_ractor_ignore_belonging(false);
314#undef RESTORE_FINALIZER
315}
316
317void
318rb_gc_set_pending_interrupt(void)
319{
320 rb_execution_context_t *ec = GET_EC();
321 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
322}
323
324void
325rb_gc_unset_pending_interrupt(void)
326{
327 rb_execution_context_t *ec = GET_EC();
328 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
329}
330
331bool
332rb_gc_multi_ractor_p(void)
333{
334 return rb_multi_ractor_p();
335}
336
337bool
338rb_gc_shutdown_call_finalizer_p(VALUE obj)
339{
340 switch (BUILTIN_TYPE(obj)) {
341 case T_DATA:
342 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
343 if (rb_obj_is_thread(obj)) return false;
344 if (rb_obj_is_mutex(obj)) return false;
345 if (rb_obj_is_fiber(obj)) return false;
346 if (rb_ractor_p(obj)) return false;
347 if (rb_obj_is_fstring_table(obj)) return false;
348 if (rb_obj_is_symbol_table(obj)) return false;
349
350 return true;
351
352 case T_FILE:
353 return true;
354
355 case T_SYMBOL:
356 return true;
357
358 case T_NONE:
359 return false;
360
361 default:
362 return ruby_free_at_exit_p();
363 }
364}
365
366uint32_t
367rb_gc_get_shape(VALUE obj)
368{
369 return (uint32_t)rb_obj_shape_id(obj);
370}
371
372void
373rb_gc_set_shape(VALUE obj, uint32_t shape_id)
374{
375 RBASIC_SET_SHAPE_ID(obj, (uint32_t)shape_id);
376}
377
378uint32_t
379rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
380{
382
383 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
384}
385
386void rb_vm_update_references(void *ptr);
387
388#define rb_setjmp(env) RUBY_SETJMP(env)
389#define rb_jmp_buf rb_jmpbuf_t
390#undef rb_data_object_wrap
391
392#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
393#define MAP_ANONYMOUS MAP_ANON
394#endif
395
396#define unless_objspace(objspace) \
397 void *objspace; \
398 rb_vm_t *unless_objspace_vm = GET_VM(); \
399 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
400 else /* return; or objspace will be warned uninitialized */
401
402#define RMOVED(obj) ((struct RMoved *)(obj))
403
404#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
405 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
406 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
407 } \
408} while (0)
409
410#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
411
412#if RUBY_MARK_FREE_DEBUG
413int ruby_gc_debug_indent = 0;
414#endif
415
416#ifndef RGENGC_OBJ_INFO
417# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
418#endif
419
420#ifndef CALC_EXACT_MALLOC_SIZE
421# define CALC_EXACT_MALLOC_SIZE 0
422#endif
423
425
426static size_t malloc_offset = 0;
427#if defined(HAVE_MALLOC_USABLE_SIZE)
428static size_t
429gc_compute_malloc_offset(void)
430{
431 // Different allocators use different metadata storage strategies which result in different
432 // ideal sizes.
433 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
434 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
435 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
436 // waste memory.
437 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
438 // no wasted memory.
439 size_t offset = 0;
440 for (offset = 0; offset <= 16; offset += 8) {
441 size_t allocated = (64 - offset);
442 void *test_ptr = malloc(allocated);
443 size_t wasted = malloc_usable_size(test_ptr) - allocated;
444 free(test_ptr);
445
446 if (wasted == 0) {
447 return offset;
448 }
449 }
450 return 0;
451}
452#else
453static size_t
454gc_compute_malloc_offset(void)
455{
456 // If we don't have malloc_usable_size, we use powers of 2.
457 return 0;
458}
459#endif
460
461size_t
462rb_malloc_grow_capa(size_t current, size_t type_size)
463{
464 size_t current_capacity = current;
465 if (current_capacity < 4) {
466 current_capacity = 4;
467 }
468 current_capacity *= type_size;
469
470 // We double the current capacity.
471 size_t new_capacity = (current_capacity * 2);
472
473 // And round up to the next power of 2 if it's not already one.
474 if (rb_popcount64(new_capacity) != 1) {
475 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
476 }
477
478 new_capacity -= malloc_offset;
479 new_capacity /= type_size;
480 if (current > new_capacity) {
481 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
482 }
483 RUBY_ASSERT(new_capacity > current);
484 return new_capacity;
485}
486
487static inline struct rbimpl_size_mul_overflow_tag
488size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
489{
490 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
491 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
492 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
493}
494
495static inline struct rbimpl_size_mul_overflow_tag
496size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
497{
498 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
499 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
500 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
501 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
502}
503
504PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
505
506static inline size_t
507size_mul_or_raise(size_t x, size_t y, VALUE exc)
508{
509 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
510 if (LIKELY(!t.left)) {
511 return t.right;
512 }
513 else if (rb_during_gc()) {
514 rb_memerror(); /* or...? */
515 }
516 else {
517 gc_raise(
518 exc,
519 "integer overflow: %"PRIuSIZE
520 " * %"PRIuSIZE
521 " > %"PRIuSIZE,
522 x, y, (size_t)SIZE_MAX);
523 }
524}
525
526size_t
527rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
528{
529 return size_mul_or_raise(x, y, exc);
530}
531
532static inline size_t
533size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
534{
535 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
536 if (LIKELY(!t.left)) {
537 return t.right;
538 }
539 else if (rb_during_gc()) {
540 rb_memerror(); /* or...? */
541 }
542 else {
543 gc_raise(
544 exc,
545 "integer overflow: %"PRIuSIZE
546 " * %"PRIuSIZE
547 " + %"PRIuSIZE
548 " > %"PRIuSIZE,
549 x, y, z, (size_t)SIZE_MAX);
550 }
551}
552
553size_t
554rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
555{
556 return size_mul_add_or_raise(x, y, z, exc);
557}
558
559static inline size_t
560size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
561{
562 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
563 if (LIKELY(!t.left)) {
564 return t.right;
565 }
566 else if (rb_during_gc()) {
567 rb_memerror(); /* or...? */
568 }
569 else {
570 gc_raise(
571 exc,
572 "integer overflow: %"PRIdSIZE
573 " * %"PRIdSIZE
574 " + %"PRIdSIZE
575 " * %"PRIdSIZE
576 " > %"PRIdSIZE,
577 x, y, z, w, (size_t)SIZE_MAX);
578 }
579}
580
581#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
582/* trick the compiler into thinking a external signal handler uses this */
583volatile VALUE rb_gc_guarded_val;
584volatile VALUE *
585rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
586{
587 rb_gc_guarded_val = val;
588
589 return ptr;
590}
591#endif
592
593static const char *obj_type_name(VALUE obj);
594#include "gc/default/default.c"
595
596#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
597# error "Modular GC requires dlopen"
598#elif USE_MODULAR_GC
599#include <dlfcn.h>
600
601typedef struct gc_function_map {
602 // Bootup
603 void *(*objspace_alloc)(void);
604 void (*objspace_init)(void *objspace_ptr);
605 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
606 void (*set_params)(void *objspace_ptr);
607 void (*init)(void);
608 size_t *(*heap_sizes)(void *objspace_ptr);
609 // Shutdown
610 void (*shutdown_free_objects)(void *objspace_ptr);
611 void (*objspace_free)(void *objspace_ptr);
612 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
613 // GC
614 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
615 bool (*during_gc_p)(void *objspace_ptr);
616 void (*prepare_heap)(void *objspace_ptr);
617 void (*gc_enable)(void *objspace_ptr);
618 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
619 bool (*gc_enabled_p)(void *objspace_ptr);
620 VALUE (*config_get)(void *objpace_ptr);
621 void (*config_set)(void *objspace_ptr, VALUE hash);
622 void (*stress_set)(void *objspace_ptr, VALUE flag);
623 VALUE (*stress_get)(void *objspace_ptr);
624 // Object allocation
625 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
626 size_t (*obj_slot_size)(VALUE obj);
627 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
628 bool (*size_allocatable_p)(size_t size);
629 // Malloc
630 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
631 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
632 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
633 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
634 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
635 // Marking
636 void (*mark)(void *objspace_ptr, VALUE obj);
637 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
638 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
639 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
640 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
641 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
642 // Compaction
643 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
644 VALUE (*location)(void *objspace_ptr, VALUE value);
645 // Write barriers
646 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
647 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
648 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
649 // Heap walking
650 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
651 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
652 // Finalizers
653 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
654 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
655 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
656 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
657 void (*shutdown_call_finalizer)(void *objspace_ptr);
658 // Forking
659 void (*before_fork)(void *objspace_ptr);
660 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
661 // Statistics
662 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
663 bool (*get_measure_total_time)(void *objspace_ptr);
664 unsigned long long (*get_total_time)(void *objspace_ptr);
665 size_t (*gc_count)(void *objspace_ptr);
666 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
667 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
668 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
669 const char *(*active_gc_name)(void);
670 // Miscellaneous
671 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
672 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
673 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
674 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
675 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
676
677 bool modular_gc_loaded_p;
678} rb_gc_function_map_t;
679
680static rb_gc_function_map_t rb_gc_functions;
681
682# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
683# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
684
685static void
686ruby_modular_gc_init(void)
687{
688 // Assert that the directory path ends with a /
689 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
690
691 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
692
693 rb_gc_function_map_t gc_functions = { 0 };
694
695 char *gc_so_path = NULL;
696 void *handle = NULL;
697 if (gc_so_file) {
698 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
699 * not load a shared object outside of the directory. */
700 for (size_t i = 0; i < strlen(gc_so_file); i++) {
701 char c = gc_so_file[i];
702 if (isalnum(c)) continue;
703 switch (c) {
704 case '-':
705 case '_':
706 break;
707 default:
708 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
709 exit(1);
710 }
711 }
712
713 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
714#ifdef LOAD_RELATIVE
715 Dl_info dli;
716 size_t prefix_len = 0;
717 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
718 const char *base = strrchr(dli.dli_fname, '/');
719 if (base) {
720 size_t tail = 0;
721# define end_with_p(lit) \
722 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
723 memcmp(base - tail, lit, tail) == 0)
724
725 prefix_len = base - dli.dli_fname;
726 if (end_with_p("/bin") || end_with_p("/lib")) {
727 prefix_len -= tail;
728 }
729 prefix_len += MODULAR_GC_DIR[0] != '/';
730 gc_so_path_size += prefix_len;
731 }
732 }
733#endif
734 gc_so_path = alloca(gc_so_path_size);
735 {
736 size_t gc_so_path_idx = 0;
737#define GC_SO_PATH_APPEND(str) do { \
738 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
739} while (0)
740#ifdef LOAD_RELATIVE
741 if (prefix_len > 0) {
742 memcpy(gc_so_path, dli.dli_fname, prefix_len);
743 gc_so_path_idx = prefix_len;
744 }
745#endif
746 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
747 GC_SO_PATH_APPEND(gc_so_file);
748 GC_SO_PATH_APPEND(DLEXT);
749 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
750#undef GC_SO_PATH_APPEND
751 }
752
753 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
754 if (!handle) {
755 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
756 exit(1);
757 }
758
759 gc_functions.modular_gc_loaded_p = true;
760 }
761
762# define load_modular_gc_func(name) do { \
763 if (handle) { \
764 const char *func_name = "rb_gc_impl_" #name; \
765 gc_functions.name = dlsym(handle, func_name); \
766 if (!gc_functions.name) { \
767 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
768 exit(1); \
769 } \
770 } \
771 else { \
772 gc_functions.name = rb_gc_impl_##name; \
773 } \
774} while (0)
775
776 // Bootup
777 load_modular_gc_func(objspace_alloc);
778 load_modular_gc_func(objspace_init);
779 load_modular_gc_func(ractor_cache_alloc);
780 load_modular_gc_func(set_params);
781 load_modular_gc_func(init);
782 load_modular_gc_func(heap_sizes);
783 // Shutdown
784 load_modular_gc_func(shutdown_free_objects);
785 load_modular_gc_func(objspace_free);
786 load_modular_gc_func(ractor_cache_free);
787 // GC
788 load_modular_gc_func(start);
789 load_modular_gc_func(during_gc_p);
790 load_modular_gc_func(prepare_heap);
791 load_modular_gc_func(gc_enable);
792 load_modular_gc_func(gc_disable);
793 load_modular_gc_func(gc_enabled_p);
794 load_modular_gc_func(config_set);
795 load_modular_gc_func(config_get);
796 load_modular_gc_func(stress_set);
797 load_modular_gc_func(stress_get);
798 // Object allocation
799 load_modular_gc_func(new_obj);
800 load_modular_gc_func(obj_slot_size);
801 load_modular_gc_func(heap_id_for_size);
802 load_modular_gc_func(size_allocatable_p);
803 // Malloc
804 load_modular_gc_func(malloc);
805 load_modular_gc_func(calloc);
806 load_modular_gc_func(realloc);
807 load_modular_gc_func(free);
808 load_modular_gc_func(adjust_memory_usage);
809 // Marking
810 load_modular_gc_func(mark);
811 load_modular_gc_func(mark_and_move);
812 load_modular_gc_func(mark_and_pin);
813 load_modular_gc_func(mark_maybe);
814 load_modular_gc_func(mark_weak);
815 load_modular_gc_func(remove_weak);
816 // Compaction
817 load_modular_gc_func(object_moved_p);
818 load_modular_gc_func(location);
819 // Write barriers
820 load_modular_gc_func(writebarrier);
821 load_modular_gc_func(writebarrier_unprotect);
822 load_modular_gc_func(writebarrier_remember);
823 // Heap walking
824 load_modular_gc_func(each_objects);
825 load_modular_gc_func(each_object);
826 // Finalizers
827 load_modular_gc_func(make_zombie);
828 load_modular_gc_func(define_finalizer);
829 load_modular_gc_func(undefine_finalizer);
830 load_modular_gc_func(copy_finalizer);
831 load_modular_gc_func(shutdown_call_finalizer);
832 // Forking
833 load_modular_gc_func(before_fork);
834 load_modular_gc_func(after_fork);
835 // Statistics
836 load_modular_gc_func(set_measure_total_time);
837 load_modular_gc_func(get_measure_total_time);
838 load_modular_gc_func(get_total_time);
839 load_modular_gc_func(gc_count);
840 load_modular_gc_func(latest_gc_info);
841 load_modular_gc_func(stat);
842 load_modular_gc_func(stat_heap);
843 load_modular_gc_func(active_gc_name);
844 // Miscellaneous
845 load_modular_gc_func(object_metadata);
846 load_modular_gc_func(pointer_to_heap_p);
847 load_modular_gc_func(garbage_object_p);
848 load_modular_gc_func(set_event_hook);
849 load_modular_gc_func(copy_attributes);
850
851# undef load_modular_gc_func
852
853 rb_gc_functions = gc_functions;
854}
855
856// Bootup
857# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
858# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
859# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
860# define rb_gc_impl_set_params rb_gc_functions.set_params
861# define rb_gc_impl_init rb_gc_functions.init
862# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
863// Shutdown
864# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
865# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
866# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
867// GC
868# define rb_gc_impl_start rb_gc_functions.start
869# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
870# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
871# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
872# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
873# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
874# define rb_gc_impl_config_get rb_gc_functions.config_get
875# define rb_gc_impl_config_set rb_gc_functions.config_set
876# define rb_gc_impl_stress_set rb_gc_functions.stress_set
877# define rb_gc_impl_stress_get rb_gc_functions.stress_get
878// Object allocation
879# define rb_gc_impl_new_obj rb_gc_functions.new_obj
880# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
881# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
882# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
883// Malloc
884# define rb_gc_impl_malloc rb_gc_functions.malloc
885# define rb_gc_impl_calloc rb_gc_functions.calloc
886# define rb_gc_impl_realloc rb_gc_functions.realloc
887# define rb_gc_impl_free rb_gc_functions.free
888# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
889// Marking
890# define rb_gc_impl_mark rb_gc_functions.mark
891# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
892# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
893# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
894# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
895# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
896// Compaction
897# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
898# define rb_gc_impl_location rb_gc_functions.location
899// Write barriers
900# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
901# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
902# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
903// Heap walking
904# define rb_gc_impl_each_objects rb_gc_functions.each_objects
905# define rb_gc_impl_each_object rb_gc_functions.each_object
906// Finalizers
907# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
908# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
909# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
910# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
911# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
912// Forking
913# define rb_gc_impl_before_fork rb_gc_functions.before_fork
914# define rb_gc_impl_after_fork rb_gc_functions.after_fork
915// Statistics
916# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
917# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
918# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
919# define rb_gc_impl_gc_count rb_gc_functions.gc_count
920# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
921# define rb_gc_impl_stat rb_gc_functions.stat
922# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
923# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
924// Miscellaneous
925# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
926# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
927# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
928# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
929# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
930#endif
931
932#ifdef RUBY_ASAN_ENABLED
933static void
934asan_death_callback(void)
935{
936 if (GET_VM()) {
937 rb_bug_without_die("ASAN error");
938 }
939}
940#endif
941
942static VALUE initial_stress = Qfalse;
943
944void *
945rb_objspace_alloc(void)
946{
947#if USE_MODULAR_GC
948 ruby_modular_gc_init();
949#endif
950
951 void *objspace = rb_gc_impl_objspace_alloc();
952 ruby_current_vm_ptr->gc.objspace = objspace;
953 rb_gc_impl_objspace_init(objspace);
954 rb_gc_impl_stress_set(objspace, initial_stress);
955
956#ifdef RUBY_ASAN_ENABLED
957 __sanitizer_set_death_callback(asan_death_callback);
958#endif
959
960 return objspace;
961}
962
963void
964rb_objspace_free(void *objspace)
965{
966 rb_gc_impl_objspace_free(objspace);
967}
968
969size_t
970rb_gc_obj_slot_size(VALUE obj)
971{
972 return rb_gc_impl_obj_slot_size(obj);
973}
974
975static inline void
976gc_validate_pc(VALUE obj)
977{
978#if RUBY_DEBUG
979 // IMEMOs and objects without a class (e.g managed id table) are not traceable
980 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
981
982 rb_execution_context_t *ec = GET_EC();
983 const rb_control_frame_t *cfp = ec->cfp;
984 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
985 const VALUE *iseq_encoded = ISEQ_BODY(cfp->iseq)->iseq_encoded;
986 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size;
987 RUBY_ASSERT(cfp->pc >= iseq_encoded, "PC not set when allocating, breaking tracing");
988 RUBY_ASSERT(cfp->pc <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
989 }
990#endif
991}
992
993static inline VALUE
994newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
995{
996 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
997 RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
998
999 gc_validate_pc(obj);
1000
1001 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1002 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1003 {
1004 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1005
1006 /* We must disable GC here because the callback could call xmalloc
1007 * which could potentially trigger a GC, and a lot of code is unsafe
1008 * to trigger a GC right after an object has been allocated because
1009 * they perform initialization for the object and assume that the
1010 * GC does not trigger before then. */
1011 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1012 {
1013 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1014 }
1015 if (!gc_disabled) rb_gc_enable();
1016 }
1017 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1018 }
1019
1020#if RGENGC_CHECK_MODE
1021# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1022# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1023# endif
1024
1025 memset(
1026 (void *)(obj + sizeof(struct RBasic)),
1027 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1028 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1029 );
1030#endif
1031
1032 return obj;
1033}
1034
1035VALUE
1036rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1037{
1038 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1039 return newobj_of(GET_RACTOR(), klass, flags, shape_id, FALSE, size);
1040}
1041
1042VALUE
1043rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1044{
1045 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1046 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, shape_id, TRUE, size);
1047}
1048
1049#define UNEXPECTED_NODE(func) \
1050 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1051 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1052
1053static inline void
1054rb_data_object_check(VALUE klass)
1055{
1056 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1057 rb_undef_alloc_func(klass);
1058 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1059 }
1060}
1061
1062VALUE
1063rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1064{
1066 if (klass) rb_data_object_check(klass);
1067 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA, ROOT_SHAPE_ID, !dmark, sizeof(struct RTypedData));
1068
1069 struct RData *data = (struct RData *)obj;
1070 data->dmark = dmark;
1071 data->dfree = dfree;
1072 data->data = datap;
1073
1074 return obj;
1075}
1076
1077VALUE
1079{
1080 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1081 DATA_PTR(obj) = xcalloc(1, size);
1082 return obj;
1083}
1084
1085static VALUE
1086typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1087{
1088 RBIMPL_NONNULL_ARG(type);
1089 if (klass) rb_data_object_check(klass);
1090 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1091 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, ROOT_SHAPE_ID, wb_protected, size);
1092
1093 struct RTypedData *data = (struct RTypedData *)obj;
1094 data->fields_obj = 0;
1095 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1096 data->data = datap;
1097
1098 return obj;
1099}
1100
1101VALUE
1103{
1104 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1105 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1106 }
1107
1108 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1109}
1110
1111VALUE
1113{
1114 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1115 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1116 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1117 }
1118
1119 size_t embed_size = offsetof(struct RTypedData, data) + size;
1120 if (rb_gc_size_allocatable_p(embed_size)) {
1121 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1122 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1123 return obj;
1124 }
1125 }
1126
1127 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1128 DATA_PTR(obj) = xcalloc(1, size);
1129 return obj;
1130}
1131
1132static size_t
1133rb_objspace_data_type_memsize(VALUE obj)
1134{
1135 size_t size = 0;
1136 if (RTYPEDDATA_P(obj)) {
1137 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1138 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1139
1140 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1141#ifdef HAVE_MALLOC_USABLE_SIZE
1142 size += malloc_usable_size((void *)ptr);
1143#endif
1144 }
1145
1146 if (ptr && type->function.dsize) {
1147 size += type->function.dsize(ptr);
1148 }
1149 }
1150
1151 return size;
1152}
1153
1154const char *
1155rb_objspace_data_type_name(VALUE obj)
1156{
1157 if (RTYPEDDATA_P(obj)) {
1158 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1159 }
1160 else {
1161 return 0;
1162 }
1163}
1164
1165static void
1166io_fptr_finalize(void *fptr)
1167{
1168 rb_io_fptr_finalize((struct rb_io *)fptr);
1169}
1170
1171static inline void
1172make_io_zombie(void *objspace, VALUE obj)
1173{
1174 rb_io_t *fptr = RFILE(obj)->fptr;
1175 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1176}
1177
1178static bool
1179rb_data_free(void *objspace, VALUE obj)
1180{
1181 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1182 if (data) {
1183 int free_immediately = false;
1184 void (*dfree)(void *);
1185
1186 if (RTYPEDDATA_P(obj)) {
1187 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1188 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1189 }
1190 else {
1191 dfree = RDATA(obj)->dfree;
1192 }
1193
1194 if (dfree) {
1195 if (dfree == RUBY_DEFAULT_FREE) {
1196 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1197 xfree(data);
1198 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1199 }
1200 }
1201 else if (free_immediately) {
1202 (*dfree)(data);
1203 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1204 xfree(data);
1205 }
1206
1207 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1208 }
1209 else {
1210 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1211 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1212 return FALSE;
1213 }
1214 }
1215 else {
1216 RB_DEBUG_COUNTER_INC(obj_data_empty);
1217 }
1218 }
1219
1220 return true;
1221}
1222
1224 VALUE klass;
1225 rb_objspace_t *objspace; // used for update_*
1226};
1227
1228static void
1229classext_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1230{
1231 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1232
1233 rb_class_classext_free(args->klass, ext, is_prime);
1234}
1235
1236static void
1237classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1238{
1239 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1240
1241 rb_iclass_classext_free(args->klass, ext, is_prime);
1242}
1243
1244bool
1245rb_gc_obj_free(void *objspace, VALUE obj)
1246{
1247 struct classext_foreach_args args;
1248
1249 RB_DEBUG_COUNTER_INC(obj_free);
1250
1251 switch (BUILTIN_TYPE(obj)) {
1252 case T_NIL:
1253 case T_FIXNUM:
1254 case T_TRUE:
1255 case T_FALSE:
1256 rb_bug("obj_free() called for broken object");
1257 break;
1258 default:
1259 break;
1260 }
1261
1262 switch (BUILTIN_TYPE(obj)) {
1263 case T_OBJECT:
1264 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1265 if (rb_shape_obj_too_complex_p(obj)) {
1266 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1267 st_free_table(ROBJECT_FIELDS_HASH(obj));
1268 }
1269 else {
1270 xfree(ROBJECT(obj)->as.heap.fields);
1271 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1272 }
1273 }
1274 else {
1275 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1276 }
1277 break;
1278 case T_MODULE:
1279 case T_CLASS:
1280#if USE_ZJIT
1281 rb_zjit_klass_free(obj);
1282#endif
1283 args.klass = obj;
1284 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1285 if (RCLASS_CLASSEXT_TBL(obj)) {
1286 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1287 }
1288 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1289 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1290 break;
1291 case T_STRING:
1292 rb_str_free(obj);
1293 break;
1294 case T_ARRAY:
1295 rb_ary_free(obj);
1296 break;
1297 case T_HASH:
1298#if USE_DEBUG_COUNTER
1299 switch (RHASH_SIZE(obj)) {
1300 case 0:
1301 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1302 break;
1303 case 1:
1304 RB_DEBUG_COUNTER_INC(obj_hash_1);
1305 break;
1306 case 2:
1307 RB_DEBUG_COUNTER_INC(obj_hash_2);
1308 break;
1309 case 3:
1310 RB_DEBUG_COUNTER_INC(obj_hash_3);
1311 break;
1312 case 4:
1313 RB_DEBUG_COUNTER_INC(obj_hash_4);
1314 break;
1315 case 5:
1316 case 6:
1317 case 7:
1318 case 8:
1319 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1320 break;
1321 default:
1322 GC_ASSERT(RHASH_SIZE(obj) > 8);
1323 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1324 }
1325
1326 if (RHASH_AR_TABLE_P(obj)) {
1327 if (RHASH_AR_TABLE(obj) == NULL) {
1328 RB_DEBUG_COUNTER_INC(obj_hash_null);
1329 }
1330 else {
1331 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1332 }
1333 }
1334 else {
1335 RB_DEBUG_COUNTER_INC(obj_hash_st);
1336 }
1337#endif
1338
1339 rb_hash_free(obj);
1340 break;
1341 case T_REGEXP:
1342 if (RREGEXP(obj)->ptr) {
1343 onig_free(RREGEXP(obj)->ptr);
1344 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1345 }
1346 break;
1347 case T_DATA:
1348 if (!rb_data_free(objspace, obj)) return false;
1349 break;
1350 case T_MATCH:
1351 {
1352 rb_matchext_t *rm = RMATCH_EXT(obj);
1353#if USE_DEBUG_COUNTER
1354 if (rm->regs.num_regs >= 8) {
1355 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1356 }
1357 else if (rm->regs.num_regs >= 4) {
1358 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1359 }
1360 else if (rm->regs.num_regs >= 1) {
1361 RB_DEBUG_COUNTER_INC(obj_match_under4);
1362 }
1363#endif
1364 onig_region_free(&rm->regs, 0);
1365 xfree(rm->char_offset);
1366
1367 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1368 }
1369 break;
1370 case T_FILE:
1371 if (RFILE(obj)->fptr) {
1372 make_io_zombie(objspace, obj);
1373 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1374 return FALSE;
1375 }
1376 break;
1377 case T_RATIONAL:
1378 RB_DEBUG_COUNTER_INC(obj_rational);
1379 break;
1380 case T_COMPLEX:
1381 RB_DEBUG_COUNTER_INC(obj_complex);
1382 break;
1383 case T_MOVED:
1384 break;
1385 case T_ICLASS:
1386 args.klass = obj;
1387
1388 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1389 if (RCLASS_CLASSEXT_TBL(obj)) {
1390 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1391 }
1392
1393 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1394 break;
1395
1396 case T_FLOAT:
1397 RB_DEBUG_COUNTER_INC(obj_float);
1398 break;
1399
1400 case T_BIGNUM:
1401 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1402 xfree(BIGNUM_DIGITS(obj));
1403 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1404 }
1405 else {
1406 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1407 }
1408 break;
1409
1410 case T_NODE:
1411 UNEXPECTED_NODE(obj_free);
1412 break;
1413
1414 case T_STRUCT:
1415 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1416 RSTRUCT(obj)->as.heap.ptr == NULL) {
1417 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1418 }
1419 else {
1420 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1421 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1422 }
1423 break;
1424
1425 case T_SYMBOL:
1426 RB_DEBUG_COUNTER_INC(obj_symbol);
1427 break;
1428
1429 case T_IMEMO:
1430 rb_imemo_free((VALUE)obj);
1431 break;
1432
1433 default:
1434 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1435 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1436 }
1437
1438 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1439 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1440 return FALSE;
1441 }
1442 else {
1443 return TRUE;
1444 }
1445}
1446
1447void
1448rb_objspace_set_event_hook(const rb_event_flag_t event)
1449{
1450 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1451}
1452
1453static int
1454internal_object_p(VALUE obj)
1455{
1456 void *ptr = asan_unpoison_object_temporary(obj);
1457
1458 if (RBASIC(obj)->flags) {
1459 switch (BUILTIN_TYPE(obj)) {
1460 case T_NODE:
1461 UNEXPECTED_NODE(internal_object_p);
1462 break;
1463 case T_NONE:
1464 case T_MOVED:
1465 case T_IMEMO:
1466 case T_ICLASS:
1467 case T_ZOMBIE:
1468 break;
1469 case T_CLASS:
1470 if (obj == rb_mRubyVMFrozenCore)
1471 return 1;
1472
1473 if (!RBASIC_CLASS(obj)) break;
1474 if (RCLASS_SINGLETON_P(obj)) {
1475 return rb_singleton_class_internal_p(obj);
1476 }
1477 return 0;
1478 default:
1479 if (!RBASIC(obj)->klass) break;
1480 return 0;
1481 }
1482 }
1483 if (ptr || !RBASIC(obj)->flags) {
1484 rb_asan_poison_object(obj);
1485 }
1486 return 1;
1487}
1488
1489int
1490rb_objspace_internal_object_p(VALUE obj)
1491{
1492 return internal_object_p(obj);
1493}
1494
1496 size_t num;
1497 VALUE of;
1498};
1499
1500static int
1501os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1502{
1503 struct os_each_struct *oes = (struct os_each_struct *)data;
1504
1505 VALUE v = (VALUE)vstart;
1506 for (; v != (VALUE)vend; v += stride) {
1507 if (!internal_object_p(v)) {
1508 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1509 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1510 rb_yield(v);
1511 oes->num++;
1512 }
1513 }
1514 }
1515 }
1516
1517 return 0;
1518}
1519
1520static VALUE
1521os_obj_of(VALUE of)
1522{
1523 struct os_each_struct oes;
1524
1525 oes.num = 0;
1526 oes.of = of;
1527 rb_objspace_each_objects(os_obj_of_i, &oes);
1528 return SIZET2NUM(oes.num);
1529}
1530
1531/*
1532 * call-seq:
1533 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1534 * ObjectSpace.each_object([module]) -> an_enumerator
1535 *
1536 * Calls the block once for each living, nonimmediate object in this
1537 * Ruby process. If <i>module</i> is specified, calls the block
1538 * for only those classes or modules that match (or are a subclass of)
1539 * <i>module</i>. Returns the number of objects found. Immediate
1540 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1541 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1542 * never returned. In the example below, #each_object returns both
1543 * the numbers we defined and several constants defined in the Math
1544 * module.
1545 *
1546 * If no block is given, an enumerator is returned instead.
1547 *
1548 * a = 102.7
1549 * b = 95 # Won't be returned
1550 * c = 12345678987654321
1551 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1552 * puts "Total count: #{count}"
1553 *
1554 * <em>produces:</em>
1555 *
1556 * 12345678987654321
1557 * 102.7
1558 * 2.71828182845905
1559 * 3.14159265358979
1560 * 2.22044604925031e-16
1561 * 1.7976931348623157e+308
1562 * 2.2250738585072e-308
1563 * Total count: 7
1564 *
1565 * Due to a current known Ractor implementation issue, this method will not yield
1566 * Ractor-unshareable objects in multi-Ractor mode (when
1567 * <code>Ractor.new</code> has been called within the process at least once).
1568 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1569 *
1570 * a = 12345678987654321 # shareable
1571 * b = [].freeze # shareable
1572 * c = {} # not shareable
1573 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1574 * Ractor.new {} # enter multi-Ractor mode
1575 * ObjectSpace.each_object {|x| x } # does not yield c
1576 *
1577 */
1578
1579static VALUE
1580os_each_obj(int argc, VALUE *argv, VALUE os)
1581{
1582 VALUE of;
1583
1584 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1585 RETURN_ENUMERATOR(os, 1, &of);
1586 return os_obj_of(of);
1587}
1588
1589/*
1590 * call-seq:
1591 * ObjectSpace.undefine_finalizer(obj)
1592 *
1593 * Removes all finalizers for <i>obj</i>.
1594 *
1595 */
1596
1597static VALUE
1598undefine_final(VALUE os, VALUE obj)
1599{
1600 return rb_undefine_finalizer(obj);
1601}
1602
1603VALUE
1604rb_undefine_finalizer(VALUE obj)
1605{
1606 rb_check_frozen(obj);
1607
1608 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1609
1610 return obj;
1611}
1612
1613static void
1614should_be_callable(VALUE block)
1615{
1616 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1617 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1618 rb_obj_class(block));
1619 }
1620}
1621
1622static void
1623should_be_finalizable(VALUE obj)
1624{
1625 if (!FL_ABLE(obj)) {
1626 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1627 rb_obj_classname(obj));
1628 }
1629 rb_check_frozen(obj);
1630}
1631
1632void
1633rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1634{
1635 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1636}
1637
1638/*
1639 * call-seq:
1640 * ObjectSpace.define_finalizer(obj, aProc=proc())
1641 *
1642 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1643 * was destroyed. The object ID of the <i>obj</i> will be passed
1644 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1645 * method, make sure it can be called with a single argument.
1646 *
1647 * The return value is an array <code>[0, aProc]</code>.
1648 *
1649 * The two recommended patterns are to either create the finaliser proc
1650 * in a non-instance method where it can safely capture the needed state,
1651 * or to use a custom callable object that stores the needed state
1652 * explicitly as instance variables.
1653 *
1654 * class Foo
1655 * def initialize(data_needed_for_finalization)
1656 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1657 * end
1658 *
1659 * def self.create_finalizer(data_needed_for_finalization)
1660 * proc {
1661 * puts "finalizing #{data_needed_for_finalization}"
1662 * }
1663 * end
1664 * end
1665 *
1666 * class Bar
1667 * class Remover
1668 * def initialize(data_needed_for_finalization)
1669 * @data_needed_for_finalization = data_needed_for_finalization
1670 * end
1671 *
1672 * def call(id)
1673 * puts "finalizing #{@data_needed_for_finalization}"
1674 * end
1675 * end
1676 *
1677 * def initialize(data_needed_for_finalization)
1678 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1679 * end
1680 * end
1681 *
1682 * Note that if your finalizer references the object to be
1683 * finalized it will never be run on GC, although it will still be
1684 * run at exit. You will get a warning if you capture the object
1685 * to be finalized as the receiver of the finalizer.
1686 *
1687 * class CapturesSelf
1688 * def initialize(name)
1689 * ObjectSpace.define_finalizer(self, proc {
1690 * # this finalizer will only be run on exit
1691 * puts "finalizing #{name}"
1692 * })
1693 * end
1694 * end
1695 *
1696 * Also note that finalization can be unpredictable and is never guaranteed
1697 * to be run except on exit.
1698 */
1699
1700static VALUE
1701define_final(int argc, VALUE *argv, VALUE os)
1702{
1703 VALUE obj, block;
1704
1705 rb_scan_args(argc, argv, "11", &obj, &block);
1706 if (argc == 1) {
1707 block = rb_block_proc();
1708 }
1709
1710 if (rb_callable_receiver(block) == obj) {
1711 rb_warn("finalizer references object to be finalized");
1712 }
1713
1714 return rb_define_finalizer(obj, block);
1715}
1716
1717VALUE
1718rb_define_finalizer(VALUE obj, VALUE block)
1719{
1720 should_be_finalizable(obj);
1721 should_be_callable(block);
1722
1723 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1724
1725 block = rb_ary_new3(2, INT2FIX(0), block);
1726 OBJ_FREEZE(block);
1727 return block;
1728}
1729
1730void
1731rb_objspace_call_finalizer(void)
1732{
1733 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1734}
1735
1736void
1737rb_objspace_free_objects(void *objspace)
1738{
1739 rb_gc_impl_shutdown_free_objects(objspace);
1740}
1741
1742int
1743rb_objspace_garbage_object_p(VALUE obj)
1744{
1745 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1746}
1747
1748bool
1749rb_gc_pointer_to_heap_p(VALUE obj)
1750{
1751 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1752}
1753
1754#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1755#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1756static VALUE id2ref_value = 0;
1757static st_table *id2ref_tbl = NULL;
1758
1759#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1760static size_t object_id_counter = 1;
1761#else
1762static unsigned long long object_id_counter = 1;
1763#endif
1764
1765static inline VALUE
1766generate_next_object_id(void)
1767{
1768#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1769 // 64bit atomics are available
1770 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1771#else
1772 unsigned int lock_lev = RB_GC_VM_LOCK();
1773 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1774 RB_GC_VM_UNLOCK(lock_lev);
1775 return id;
1776#endif
1777}
1778
1779void
1780rb_gc_obj_id_moved(VALUE obj)
1781{
1782 if (UNLIKELY(id2ref_tbl)) {
1783 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1784 }
1785}
1786
1787static int
1788object_id_cmp(st_data_t x, st_data_t y)
1789{
1790 if (RB_TYPE_P(x, T_BIGNUM)) {
1791 return !rb_big_eql(x, y);
1792 }
1793 else {
1794 return x != y;
1795 }
1796}
1797
1798static st_index_t
1799object_id_hash(st_data_t n)
1800{
1801 return FIX2LONG(rb_hash((VALUE)n));
1802}
1803
1804static const struct st_hash_type object_id_hash_type = {
1805 object_id_cmp,
1806 object_id_hash,
1807};
1808
1809static void gc_mark_tbl_no_pin(st_table *table);
1810
1811static void
1812id2ref_tbl_mark(void *data)
1813{
1814 st_table *table = (st_table *)data;
1815 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1816 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1817 rb_mark_set(table);
1818 }
1819 // We purposedly don't mark values, as they are weak references.
1820 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1821}
1822
1823static size_t
1824id2ref_tbl_memsize(const void *data)
1825{
1826 return rb_st_memsize(data);
1827}
1828
1829static void
1830id2ref_tbl_free(void *data)
1831{
1832 id2ref_tbl = NULL; // clear global ref
1833 st_table *table = (st_table *)data;
1834 st_free_table(table);
1835}
1836
1837static const rb_data_type_t id2ref_tbl_type = {
1838 .wrap_struct_name = "VM/_id2ref_table",
1839 .function = {
1840 .dmark = id2ref_tbl_mark,
1841 .dfree = id2ref_tbl_free,
1842 .dsize = id2ref_tbl_memsize,
1843 // dcompact function not required because the table is reference updated
1844 // in rb_gc_vm_weak_table_foreach
1845 },
1846 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1847};
1848
1849static VALUE
1850class_object_id(VALUE klass)
1851{
1852 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1853 if (!id) {
1854 unsigned int lock_lev = RB_GC_VM_LOCK();
1855 id = generate_next_object_id();
1856 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1857 if (existing_id) {
1858 id = existing_id;
1859 }
1860 else if (RB_UNLIKELY(id2ref_tbl)) {
1861 st_insert(id2ref_tbl, id, klass);
1862 }
1863 RB_GC_VM_UNLOCK(lock_lev);
1864 }
1865 return id;
1866}
1867
1868static inline VALUE
1869object_id_get(VALUE obj, shape_id_t shape_id)
1870{
1871 VALUE id;
1872 if (rb_shape_too_complex_p(shape_id)) {
1873 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
1874 }
1875 else {
1876 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
1877 }
1878
1879#if RUBY_DEBUG
1880 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
1881 rb_p(obj);
1882 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
1883 }
1884#endif
1885
1886 return id;
1887}
1888
1889static VALUE
1890object_id0(VALUE obj)
1891{
1892 VALUE id = Qfalse;
1893 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1894
1895 if (rb_shape_has_object_id(shape_id)) {
1896 return object_id_get(obj, shape_id);
1897 }
1898
1899 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1900
1901 id = generate_next_object_id();
1902 rb_obj_field_set(obj, object_id_shape_id, 0, id);
1903
1904 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
1905 RUBY_ASSERT(rb_shape_obj_has_id(obj));
1906
1907 if (RB_UNLIKELY(id2ref_tbl)) {
1908 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1909 }
1910 return id;
1911}
1912
1913static VALUE
1914object_id(VALUE obj)
1915{
1916 switch (BUILTIN_TYPE(obj)) {
1917 case T_CLASS:
1918 case T_MODULE:
1919 // With Ruby Box, classes and modules have different fields
1920 // in different boxes, so we cannot store the object id
1921 // in fields.
1922 return class_object_id(obj);
1923 case T_IMEMO:
1924 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
1925 break;
1926 default:
1927 break;
1928 }
1929
1930 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
1931 unsigned int lock_lev = RB_GC_VM_LOCK();
1932 VALUE id = object_id0(obj);
1933 RB_GC_VM_UNLOCK(lock_lev);
1934 return id;
1935 }
1936
1937 return object_id0(obj);
1938}
1939
1940static void
1941build_id2ref_i(VALUE obj, void *data)
1942{
1943 st_table *id2ref_tbl = (st_table *)data;
1944
1945 switch (BUILTIN_TYPE(obj)) {
1946 case T_CLASS:
1947 case T_MODULE:
1948 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1949 if (RCLASS(obj)->object_id) {
1950 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
1951 }
1952 break;
1953 case T_IMEMO:
1954 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1955 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_shape_obj_has_id(obj)) {
1956 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
1957 }
1958 break;
1959 case T_OBJECT:
1960 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1961 if (rb_shape_obj_has_id(obj)) {
1962 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
1963 }
1964 break;
1965 default:
1966 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
1967 break;
1968 }
1969}
1970
1971static VALUE
1972object_id_to_ref(void *objspace_ptr, VALUE object_id)
1973{
1974 rb_objspace_t *objspace = objspace_ptr;
1975
1976 unsigned int lev = RB_GC_VM_LOCK();
1977
1978 if (!id2ref_tbl) {
1979 rb_gc_vm_barrier(); // stop other ractors
1980
1981 // GC Must not trigger while we build the table, otherwise if we end
1982 // up freeing an object that had an ID, we might try to delete it from
1983 // the table even though it wasn't inserted yet.
1984 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
1985 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
1986
1987 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
1988 // objects we just added to the table.
1989 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
1990 bool gc_disabled = RTEST(rb_gc_disable());
1991 {
1992 id2ref_tbl = tmp_id2ref_tbl;
1993 id2ref_value = tmp_id2ref_value;
1994
1995 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
1996 }
1997 if (!gc_disabled) rb_gc_enable();
1998 }
1999
2000 VALUE obj;
2001 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2002
2003 RB_GC_VM_UNLOCK(lev);
2004
2005 if (found) {
2006 return obj;
2007 }
2008
2009 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2010 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2011 }
2012 else {
2013 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2014 }
2015}
2016
2017static inline void
2018obj_free_object_id(VALUE obj)
2019{
2020 VALUE obj_id = 0;
2021 if (RB_UNLIKELY(id2ref_tbl)) {
2022 switch (BUILTIN_TYPE(obj)) {
2023 case T_CLASS:
2024 case T_MODULE:
2025 obj_id = RCLASS(obj)->object_id;
2026 break;
2027 case T_IMEMO:
2028 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2029 return;
2030 }
2031 // fallthrough
2032 case T_OBJECT:
2033 {
2034 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2035 if (rb_shape_has_object_id(shape_id)) {
2036 obj_id = object_id_get(obj, shape_id);
2037 }
2038 break;
2039 }
2040 default:
2041 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2042 return;
2043 }
2044
2045 if (RB_UNLIKELY(obj_id)) {
2046 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2047
2048 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2049 // The the object is a T_IMEMO/fields, then it's possible the actual object
2050 // has been garbage collected already.
2051 if (!RB_TYPE_P(obj, T_IMEMO)) {
2052 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2053 }
2054 }
2055 }
2056 }
2057}
2058
2059void
2060rb_gc_obj_free_vm_weak_references(VALUE obj)
2061{
2062 obj_free_object_id(obj);
2063
2064 if (rb_obj_gen_fields_p(obj)) {
2066 }
2067
2068 switch (BUILTIN_TYPE(obj)) {
2069 case T_STRING:
2070 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2071 rb_gc_free_fstring(obj);
2072 }
2073 break;
2074 case T_SYMBOL:
2075 rb_gc_free_dsymbol(obj);
2076 break;
2077 case T_IMEMO:
2078 switch (imemo_type(obj)) {
2079 case imemo_callcache: {
2080 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
2081
2082 if (vm_cc_refinement_p(cc)) {
2083 rb_vm_delete_cc_refinement(cc);
2084 }
2085
2086 break;
2087 }
2088 case imemo_callinfo:
2089 rb_vm_ci_free((const struct rb_callinfo *)obj);
2090 break;
2091 case imemo_ment:
2092 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2093 break;
2094 default:
2095 break;
2096 }
2097 break;
2098 default:
2099 break;
2100 }
2101}
2102
2103/*
2104 * call-seq:
2105 * ObjectSpace._id2ref(object_id) -> an_object
2106 *
2107 * Converts an object id to a reference to the object. May not be
2108 * called on an object id passed as a parameter to a finalizer.
2109 *
2110 * s = "I am a string" #=> "I am a string"
2111 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2112 * r == s #=> true
2113 *
2114 * On multi-ractor mode, if the object is not shareable, it raises
2115 * RangeError.
2116 *
2117 * This method is deprecated and should no longer be used.
2118 */
2119
2120static VALUE
2121id2ref(VALUE objid)
2122{
2123#if SIZEOF_LONG == SIZEOF_VOIDP
2124#define NUM2PTR(x) NUM2ULONG(x)
2125#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2126#define NUM2PTR(x) NUM2ULL(x)
2127#endif
2128 objid = rb_to_int(objid);
2129 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2130 VALUE ptr = NUM2PTR(objid);
2131 if (SPECIAL_CONST_P(ptr)) {
2132 if (ptr == Qtrue) return Qtrue;
2133 if (ptr == Qfalse) return Qfalse;
2134 if (NIL_P(ptr)) return Qnil;
2135 if (FIXNUM_P(ptr)) return ptr;
2136 if (FLONUM_P(ptr)) return ptr;
2137
2138 if (SYMBOL_P(ptr)) {
2139 // Check that the symbol is valid
2140 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2141 return ptr;
2142 }
2143 else {
2144 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2145 }
2146 }
2147
2148 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2149 }
2150 }
2151
2152 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2153 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2154 return obj;
2155 }
2156 else {
2157 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2158 }
2159}
2160
2161/* :nodoc: */
2162static VALUE
2163os_id2ref(VALUE os, VALUE objid)
2164{
2165 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2166 return id2ref(objid);
2167}
2168
2169static VALUE
2170rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2171{
2172 if (SPECIAL_CONST_P(obj)) {
2173#if SIZEOF_LONG == SIZEOF_VOIDP
2174 return LONG2NUM((SIGNED_VALUE)obj);
2175#else
2176 return LL2NUM((SIGNED_VALUE)obj);
2177#endif
2178 }
2179
2180 return get_heap_object_id(obj);
2181}
2182
2183static VALUE
2184nonspecial_obj_id(VALUE obj)
2185{
2186#if SIZEOF_LONG == SIZEOF_VOIDP
2187 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2188#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2189 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2190#else
2191# error not supported
2192#endif
2193}
2194
2195VALUE
2196rb_memory_id(VALUE obj)
2197{
2198 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2199}
2200
2201/*
2202 * Document-method: __id__
2203 * Document-method: object_id
2204 *
2205 * call-seq:
2206 * obj.__id__ -> integer
2207 * obj.object_id -> integer
2208 *
2209 * Returns an integer identifier for +obj+.
2210 *
2211 * The same number will be returned on all calls to +object_id+ for a given
2212 * object, and no two active objects will share an id.
2213 *
2214 * Note: that some objects of builtin classes are reused for optimization.
2215 * This is the case for immediate values and frozen string literals.
2216 *
2217 * BasicObject implements +__id__+, Kernel implements +object_id+.
2218 *
2219 * Immediate values are not passed by reference but are passed by value:
2220 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2221 *
2222 * Object.new.object_id == Object.new.object_id # => false
2223 * (21 * 2).object_id == (21 * 2).object_id # => true
2224 * "hello".object_id == "hello".object_id # => false
2225 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2226 */
2227
2228VALUE
2229rb_obj_id(VALUE obj)
2230{
2231 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2232 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2233 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2234 * any immediates. */
2235 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2236}
2237
2238bool
2239rb_obj_id_p(VALUE obj)
2240{
2241 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2242}
2243
2244/*
2245 * GC implementations should call this function before the GC phase that updates references
2246 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2247 * "W^X" policy and protect the code memory from being modified during execution. This function
2248 * makes the code memory writeable.
2249 */
2250void
2251rb_gc_before_updating_jit_code(void)
2252{
2253#if USE_YJIT
2254 rb_yjit_mark_all_writeable();
2255#endif
2256}
2257
2258/*
2259 * GC implementations should call this function before the GC phase that updates references
2260 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2261 * executable again.
2262 */
2263void
2264rb_gc_after_updating_jit_code(void)
2265{
2266#if USE_YJIT
2267 rb_yjit_mark_all_executable();
2268#endif
2269}
2270
2271static void
2272classext_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2273{
2274 size_t *size = (size_t *)arg;
2275 size_t s = 0;
2276
2277 if (RCLASSEXT_M_TBL(ext)) {
2278 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2279 }
2280 if (RCLASSEXT_CVC_TBL(ext)) {
2281 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2282 }
2283 if (RCLASSEXT_CONST_TBL(ext)) {
2284 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2285 }
2286 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2287 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2288 }
2289 if (!prime) {
2290 s += sizeof(rb_classext_t);
2291 }
2292 *size += s;
2293}
2294
2295static void
2296classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2297{
2298 size_t *size = (size_t *)arg;
2299 size_t array_size;
2300 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2301 RUBY_ASSERT(prime);
2302 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2303 *size += array_size * sizeof(VALUE);
2304 }
2305}
2306
2307size_t
2308rb_obj_memsize_of(VALUE obj)
2309{
2310 size_t size = 0;
2311
2312 if (SPECIAL_CONST_P(obj)) {
2313 return 0;
2314 }
2315
2316 switch (BUILTIN_TYPE(obj)) {
2317 case T_OBJECT:
2318 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2319 if (rb_shape_obj_too_complex_p(obj)) {
2320 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2321 }
2322 else {
2323 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2324 }
2325 }
2326 break;
2327 case T_MODULE:
2328 case T_CLASS:
2329 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2330 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2331 break;
2332 case T_ICLASS:
2333 if (RICLASS_OWNS_M_TBL_P(obj)) {
2334 if (RCLASS_M_TBL(obj)) {
2335 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2336 }
2337 }
2338 break;
2339 case T_STRING:
2340 size += rb_str_memsize(obj);
2341 break;
2342 case T_ARRAY:
2343 size += rb_ary_memsize(obj);
2344 break;
2345 case T_HASH:
2346 if (RHASH_ST_TABLE_P(obj)) {
2347 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2348 /* st_table is in the slot */
2349 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2350 }
2351 break;
2352 case T_REGEXP:
2353 if (RREGEXP_PTR(obj)) {
2354 size += onig_memsize(RREGEXP_PTR(obj));
2355 }
2356 break;
2357 case T_DATA:
2358 size += rb_objspace_data_type_memsize(obj);
2359 break;
2360 case T_MATCH:
2361 {
2362 rb_matchext_t *rm = RMATCH_EXT(obj);
2363 size += onig_region_memsize(&rm->regs);
2364 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2365 }
2366 break;
2367 case T_FILE:
2368 if (RFILE(obj)->fptr) {
2369 size += rb_io_memsize(RFILE(obj)->fptr);
2370 }
2371 break;
2372 case T_RATIONAL:
2373 case T_COMPLEX:
2374 break;
2375 case T_IMEMO:
2376 size += rb_imemo_memsize(obj);
2377 break;
2378
2379 case T_FLOAT:
2380 case T_SYMBOL:
2381 break;
2382
2383 case T_BIGNUM:
2384 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2385 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2386 }
2387 break;
2388
2389 case T_NODE:
2390 UNEXPECTED_NODE(obj_memsize_of);
2391 break;
2392
2393 case T_STRUCT:
2394 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2395 RSTRUCT(obj)->as.heap.ptr) {
2396 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2397 }
2398 break;
2399
2400 case T_ZOMBIE:
2401 case T_MOVED:
2402 break;
2403
2404 default:
2405 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2406 BUILTIN_TYPE(obj), (void*)obj);
2407 }
2408
2409 return size + rb_gc_obj_slot_size(obj);
2410}
2411
2412static int
2413set_zero(st_data_t key, st_data_t val, st_data_t arg)
2414{
2415 VALUE k = (VALUE)key;
2416 VALUE hash = (VALUE)arg;
2417 rb_hash_aset(hash, k, INT2FIX(0));
2418 return ST_CONTINUE;
2419}
2420
2422 size_t counts[T_MASK+1];
2423 size_t freed;
2424 size_t total;
2425};
2426
2427static void
2428count_objects_i(VALUE obj, void *d)
2429{
2430 struct count_objects_data *data = (struct count_objects_data *)d;
2431
2432 if (RBASIC(obj)->flags) {
2433 data->counts[BUILTIN_TYPE(obj)]++;
2434 }
2435 else {
2436 data->freed++;
2437 }
2438
2439 data->total++;
2440}
2441
2442/*
2443 * call-seq:
2444 * ObjectSpace.count_objects([result_hash]) -> hash
2445 *
2446 * Counts all objects grouped by type.
2447 *
2448 * It returns a hash, such as:
2449 * {
2450 * :TOTAL=>10000,
2451 * :FREE=>3011,
2452 * :T_OBJECT=>6,
2453 * :T_CLASS=>404,
2454 * # ...
2455 * }
2456 *
2457 * The contents of the returned hash are implementation specific.
2458 * It may be changed in future.
2459 *
2460 * The keys starting with +:T_+ means live objects.
2461 * For example, +:T_ARRAY+ is the number of arrays.
2462 * +:FREE+ means object slots which is not used now.
2463 * +:TOTAL+ means sum of above.
2464 *
2465 * If the optional argument +result_hash+ is given,
2466 * it is overwritten and returned. This is intended to avoid probe effect.
2467 *
2468 * h = {}
2469 * ObjectSpace.count_objects(h)
2470 * puts h
2471 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2472 *
2473 * This method is only expected to work on C Ruby.
2474 *
2475 */
2476
2477static VALUE
2478count_objects(int argc, VALUE *argv, VALUE os)
2479{
2480 struct count_objects_data data = { 0 };
2481 VALUE hash = Qnil;
2482 VALUE types[T_MASK + 1];
2483
2484 if (rb_check_arity(argc, 0, 1) == 1) {
2485 hash = argv[0];
2486 if (!RB_TYPE_P(hash, T_HASH))
2487 rb_raise(rb_eTypeError, "non-hash given");
2488 }
2489
2490 for (size_t i = 0; i <= T_MASK; i++) {
2491 // type_sym can allocate an object,
2492 // so we need to create all key symbols in advance
2493 // not to disturb the result
2494 types[i] = type_sym(i);
2495 }
2496
2497 // Same as type_sym, we need to create all key symbols in advance
2498 VALUE total = ID2SYM(rb_intern("TOTAL"));
2499 VALUE free = ID2SYM(rb_intern("FREE"));
2500
2501 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2502
2503 if (NIL_P(hash)) {
2504 hash = rb_hash_new();
2505 }
2506 else if (!RHASH_EMPTY_P(hash)) {
2507 rb_hash_stlike_foreach(hash, set_zero, hash);
2508 }
2509 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2510 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2511
2512 for (size_t i = 0; i <= T_MASK; i++) {
2513 if (data.counts[i]) {
2514 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2515 }
2516 }
2517
2518 return hash;
2519}
2520
2521#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2522
2523#define STACK_START (ec->machine.stack_start)
2524#define STACK_END (ec->machine.stack_end)
2525#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2526
2527#if STACK_GROW_DIRECTION < 0
2528# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2529#elif STACK_GROW_DIRECTION > 0
2530# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2531#else
2532# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2533 : (size_t)(STACK_END - STACK_START + 1))
2534#endif
2535#if !STACK_GROW_DIRECTION
2536int ruby_stack_grow_direction;
2537int
2538ruby_get_stack_grow_direction(volatile VALUE *addr)
2539{
2540 VALUE *end;
2541 SET_MACHINE_STACK_END(&end);
2542
2543 if (end > addr) return ruby_stack_grow_direction = 1;
2544 return ruby_stack_grow_direction = -1;
2545}
2546#endif
2547
2548size_t
2550{
2551 rb_execution_context_t *ec = GET_EC();
2552 SET_STACK_END;
2553 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2554 return STACK_LENGTH;
2555}
2556
2557#define PREVENT_STACK_OVERFLOW 1
2558#ifndef PREVENT_STACK_OVERFLOW
2559#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2560# define PREVENT_STACK_OVERFLOW 1
2561#else
2562# define PREVENT_STACK_OVERFLOW 0
2563#endif
2564#endif
2565#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2566static int
2567stack_check(rb_execution_context_t *ec, int water_mark)
2568{
2569 SET_STACK_END;
2570
2571 size_t length = STACK_LENGTH;
2572 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2573
2574 return length > maximum_length;
2575}
2576#else
2577#define stack_check(ec, water_mark) FALSE
2578#endif
2579
2580#define STACKFRAME_FOR_CALL_CFUNC 2048
2581
2582int
2583rb_ec_stack_check(rb_execution_context_t *ec)
2584{
2585 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2586}
2587
2588int
2590{
2591 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2592}
2593
2594/* ==================== Marking ==================== */
2595
2596#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2597 if (!RB_SPECIAL_CONST_P(obj)) { \
2598 rb_vm_t *vm = GET_VM(); \
2599 void *objspace = vm->gc.objspace; \
2600 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2601 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2602 (func)(objspace, (obj_or_ptr)); \
2603 } \
2604 else if (check_obj ? \
2605 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2606 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2607 true) { \
2608 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2609 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2610 vm->gc.mark_func_data = NULL; \
2611 mark_func_data->mark_func((obj), mark_func_data->data); \
2612 vm->gc.mark_func_data = mark_func_data; \
2613 } \
2614 } \
2615} while (0)
2616
2617static inline void
2618gc_mark_internal(VALUE obj)
2619{
2620 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2621}
2622
2623void
2624rb_gc_mark_movable(VALUE obj)
2625{
2626 gc_mark_internal(obj);
2627}
2628
2629void
2630rb_gc_mark_and_move(VALUE *ptr)
2631{
2632 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2633}
2634
2635static inline void
2636gc_mark_and_pin_internal(VALUE obj)
2637{
2638 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2639}
2640
2641void
2642rb_gc_mark(VALUE obj)
2643{
2644 gc_mark_and_pin_internal(obj);
2645}
2646
2647static inline void
2648gc_mark_maybe_internal(VALUE obj)
2649{
2650 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2651}
2652
2653void
2654rb_gc_mark_maybe(VALUE obj)
2655{
2656 gc_mark_maybe_internal(obj);
2657}
2658
2659void
2660rb_gc_mark_weak(VALUE *ptr)
2661{
2662 if (RB_SPECIAL_CONST_P(*ptr)) return;
2663
2664 rb_vm_t *vm = GET_VM();
2665 void *objspace = vm->gc.objspace;
2666 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2667 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2668
2669 rb_gc_impl_mark_weak(objspace, ptr);
2670 }
2671 else {
2672 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2673 }
2674}
2675
2676void
2677rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2678{
2679 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2680}
2681
2682ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2683static void
2684each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2685{
2686 VALUE v;
2687 while (n--) {
2688 v = *x;
2689 cb(v, data);
2690 x++;
2691 }
2692}
2693
2694static void
2695each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2696{
2697 if (end <= start) return;
2698 each_location(start, end - start, cb, data);
2699}
2700
2701static void
2702gc_mark_maybe_each_location(VALUE obj, void *data)
2703{
2704 gc_mark_maybe_internal(obj);
2705}
2706
2707void
2708rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2709{
2710 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2711}
2712
2713void
2714rb_gc_mark_values(long n, const VALUE *values)
2715{
2716 for (long i = 0; i < n; i++) {
2717 gc_mark_internal(values[i]);
2718 }
2719}
2720
2721void
2722rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2723{
2724 for (long i = 0; i < n; i++) {
2725 gc_mark_and_pin_internal(values[i]);
2726 }
2727}
2728
2729static int
2730mark_key(st_data_t key, st_data_t value, st_data_t data)
2731{
2732 gc_mark_and_pin_internal((VALUE)key);
2733
2734 return ST_CONTINUE;
2735}
2736
2737void
2738rb_mark_set(st_table *tbl)
2739{
2740 if (!tbl) return;
2741
2742 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2743}
2744
2745static int
2746mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2747{
2748 gc_mark_internal((VALUE)key);
2749 gc_mark_internal((VALUE)value);
2750
2751 return ST_CONTINUE;
2752}
2753
2754static int
2755pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2756{
2757 gc_mark_and_pin_internal((VALUE)key);
2758 gc_mark_and_pin_internal((VALUE)value);
2759
2760 return ST_CONTINUE;
2761}
2762
2763static int
2764pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2765{
2766 gc_mark_and_pin_internal((VALUE)key);
2767 gc_mark_internal((VALUE)value);
2768
2769 return ST_CONTINUE;
2770}
2771
2772static void
2773mark_hash(VALUE hash)
2774{
2775 if (rb_hash_compare_by_id_p(hash)) {
2776 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2777 }
2778 else {
2779 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2780 }
2781
2782 gc_mark_internal(RHASH(hash)->ifnone);
2783}
2784
2785void
2786rb_mark_hash(st_table *tbl)
2787{
2788 if (!tbl) return;
2789
2790 st_foreach(tbl, pin_key_pin_value, 0);
2791}
2792
2793static enum rb_id_table_iterator_result
2794mark_method_entry_i(VALUE me, void *objspace)
2795{
2796 gc_mark_internal(me);
2797
2798 return ID_TABLE_CONTINUE;
2799}
2800
2801static void
2802mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2803{
2804 if (tbl) {
2805 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2806 }
2807}
2808
2809static enum rb_id_table_iterator_result
2810mark_const_entry_i(VALUE value, void *objspace)
2811{
2812 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2813
2814 if (!rb_gc_checking_shareable()) {
2815 gc_mark_internal(ce->value);
2816 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
2817 }
2818 return ID_TABLE_CONTINUE;
2819}
2820
2821static void
2822mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2823{
2824 if (!tbl) return;
2825 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2826}
2827
2828static enum rb_id_table_iterator_result
2829mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2830{
2831 struct rb_cvar_class_tbl_entry *entry;
2832
2833 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2834
2835 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2836 gc_mark_internal((VALUE)entry->cref);
2837
2838 return ID_TABLE_CONTINUE;
2839}
2840
2841static void
2842mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2843{
2844 if (!tbl) return;
2845 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2846}
2847
2848#if STACK_GROW_DIRECTION < 0
2849#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2850#elif STACK_GROW_DIRECTION > 0
2851#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2852#else
2853#define GET_STACK_BOUNDS(start, end, appendix) \
2854 ((STACK_END < STACK_START) ? \
2855 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2856#endif
2857
2858static void
2859gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2860{
2861 gc_mark_maybe_internal(obj);
2862
2863#ifdef RUBY_ASAN_ENABLED
2864 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2865 void *fake_frame_start;
2866 void *fake_frame_end;
2867 bool is_fake_frame = asan_get_fake_stack_extents(
2868 ec->machine.asan_fake_stack_handle, obj,
2869 ec->machine.stack_start, ec->machine.stack_end,
2870 &fake_frame_start, &fake_frame_end
2871 );
2872 if (is_fake_frame) {
2873 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2874 }
2875#endif
2876}
2877
2878static VALUE
2879gc_location_internal(void *objspace, VALUE value)
2880{
2881 if (SPECIAL_CONST_P(value)) {
2882 return value;
2883 }
2884
2885 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2886
2887 return rb_gc_impl_location(objspace, value);
2888}
2889
2890VALUE
2891rb_gc_location(VALUE value)
2892{
2893 return gc_location_internal(rb_gc_get_objspace(), value);
2894}
2895
2896#if defined(__wasm__)
2897
2898
2899static VALUE *rb_stack_range_tmp[2];
2900
2901static void
2902rb_mark_locations(void *begin, void *end)
2903{
2904 rb_stack_range_tmp[0] = begin;
2905 rb_stack_range_tmp[1] = end;
2906}
2907
2908void
2909rb_gc_save_machine_context(void)
2910{
2911 // no-op
2912}
2913
2914# if defined(__EMSCRIPTEN__)
2915
2916static void
2917mark_current_machine_context(const rb_execution_context_t *ec)
2918{
2919 emscripten_scan_stack(rb_mark_locations);
2920 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2921
2922 emscripten_scan_registers(rb_mark_locations);
2923 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2924}
2925# else // use Asyncify version
2926
2927static void
2928mark_current_machine_context(rb_execution_context_t *ec)
2929{
2930 VALUE *stack_start, *stack_end;
2931 SET_STACK_END;
2932 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2933 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2934
2935 rb_wasm_scan_locals(rb_mark_locations);
2936 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2937}
2938
2939# endif
2940
2941#else // !defined(__wasm__)
2942
2943void
2944rb_gc_save_machine_context(void)
2945{
2946 rb_thread_t *thread = GET_THREAD();
2947
2948 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2949}
2950
2951
2952static void
2953mark_current_machine_context(const rb_execution_context_t *ec)
2954{
2955 rb_gc_mark_machine_context(ec);
2956}
2957#endif
2958
2959void
2960rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2961{
2962 VALUE *stack_start, *stack_end;
2963
2964 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2965 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2966
2967 void *data =
2968#ifdef RUBY_ASAN_ENABLED
2969 /* gc_mark_machine_stack_location_maybe() uses data as const */
2971#else
2972 NULL;
2973#endif
2974
2975 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2976 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2977 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2978}
2979
2980static int
2981rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2982{
2983 gc_mark_and_pin_internal((VALUE)value);
2984
2985 return ST_CONTINUE;
2986}
2987
2988void
2989rb_mark_tbl(st_table *tbl)
2990{
2991 if (!tbl || tbl->num_entries == 0) return;
2992
2993 st_foreach(tbl, rb_mark_tbl_i, 0);
2994}
2995
2996static void
2997gc_mark_tbl_no_pin(st_table *tbl)
2998{
2999 if (!tbl || tbl->num_entries == 0) return;
3000
3001 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3002}
3003
3004void
3005rb_mark_tbl_no_pin(st_table *tbl)
3006{
3007 gc_mark_tbl_no_pin(tbl);
3008}
3009
3010static bool
3011gc_declarative_marking_p(const rb_data_type_t *type)
3012{
3013 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3014}
3015
3016void
3017rb_gc_mark_roots(void *objspace, const char **categoryp)
3018{
3019 rb_execution_context_t *ec = GET_EC();
3020 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3021
3022#define MARK_CHECKPOINT(category) do { \
3023 if (categoryp) *categoryp = category; \
3024} while (0)
3025
3026 MARK_CHECKPOINT("vm");
3027 rb_vm_mark(vm);
3028
3029 MARK_CHECKPOINT("end_proc");
3030 rb_mark_end_proc();
3031
3032 MARK_CHECKPOINT("global_tbl");
3033 rb_gc_mark_global_tbl();
3034
3035#if USE_YJIT
3036 void rb_yjit_root_mark(void); // in Rust
3037
3038 if (rb_yjit_enabled_p) {
3039 MARK_CHECKPOINT("YJIT");
3040 rb_yjit_root_mark();
3041 }
3042#endif
3043
3044#if USE_ZJIT
3045 void rb_zjit_root_mark(void);
3046 if (rb_zjit_enabled_p) {
3047 MARK_CHECKPOINT("ZJIT");
3048 rb_zjit_root_mark();
3049 }
3050#endif
3051
3052 MARK_CHECKPOINT("machine_context");
3053 mark_current_machine_context(ec);
3054
3055 MARK_CHECKPOINT("global_symbols");
3056 rb_sym_global_symbols_mark_and_move();
3057
3058 MARK_CHECKPOINT("finish");
3059
3060#undef MARK_CHECKPOINT
3061}
3062
3067
3068static void
3069gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3070{
3072 rb_objspace_t *objspace = foreach_arg->objspace;
3073
3074 if (RCLASSEXT_SUPER(ext)) {
3075 gc_mark_internal(RCLASSEXT_SUPER(ext));
3076 }
3077 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3078
3079 if (!rb_gc_checking_shareable()) {
3080 // unshareable
3081 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3082 }
3083
3084 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3085 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3086 }
3087 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3088 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3089 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3090 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3091}
3092
3093static void
3094gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3095{
3097 rb_objspace_t *objspace = foreach_arg->objspace;
3098
3099 if (RCLASSEXT_SUPER(ext)) {
3100 gc_mark_internal(RCLASSEXT_SUPER(ext));
3101 }
3102 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3103 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3104 }
3105 if (RCLASSEXT_INCLUDER(ext)) {
3106 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3107 }
3108 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3109 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3110}
3111
3112#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3113
3114void
3115rb_gc_mark_children(void *objspace, VALUE obj)
3116{
3117 struct gc_mark_classext_foreach_arg foreach_args;
3118
3119 if (rb_obj_gen_fields_p(obj)) {
3120 rb_mark_generic_ivar(obj);
3121 }
3122
3123 switch (BUILTIN_TYPE(obj)) {
3124 case T_FLOAT:
3125 case T_BIGNUM:
3126 return;
3127
3128 case T_NIL:
3129 case T_FIXNUM:
3130 rb_bug("rb_gc_mark() called for broken object");
3131 break;
3132
3133 case T_NODE:
3134 UNEXPECTED_NODE(rb_gc_mark);
3135 break;
3136
3137 case T_IMEMO:
3138 rb_imemo_mark_and_move(obj, false);
3139 return;
3140
3141 default:
3142 break;
3143 }
3144
3145 gc_mark_internal(RBASIC(obj)->klass);
3146
3147 switch (BUILTIN_TYPE(obj)) {
3148 case T_CLASS:
3149 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3150 !rb_gc_checking_shareable()) {
3151 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3152 }
3153 // Continue to the shared T_CLASS/T_MODULE
3154 case T_MODULE:
3155 foreach_args.objspace = objspace;
3156 foreach_args.obj = obj;
3157 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3158 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3159 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3160 }
3161 break;
3162
3163 case T_ICLASS:
3164 foreach_args.objspace = objspace;
3165 foreach_args.obj = obj;
3166 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3167 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3168 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3169 }
3170 break;
3171
3172 case T_ARRAY:
3173 if (ARY_SHARED_P(obj)) {
3174 VALUE root = ARY_SHARED_ROOT(obj);
3175 gc_mark_internal(root);
3176 }
3177 else {
3178 long len = RARRAY_LEN(obj);
3179 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3180 for (long i = 0; i < len; i++) {
3181 gc_mark_internal(ptr[i]);
3182 }
3183 }
3184 break;
3185
3186 case T_HASH:
3187 mark_hash(obj);
3188 break;
3189
3190 case T_SYMBOL:
3191 gc_mark_internal(RSYMBOL(obj)->fstr);
3192 break;
3193
3194 case T_STRING:
3195 if (STR_SHARED_P(obj)) {
3196 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3197 /* Embedded shared strings cannot be moved because this string
3198 * points into the slot of the shared string. There may be code
3199 * using the RSTRING_PTR on the stack, which would pin this
3200 * string but not pin the shared string, causing it to move. */
3201 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3202 }
3203 else {
3204 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3205 }
3206 }
3207 break;
3208
3209 case T_DATA: {
3210 bool typed_data = RTYPEDDATA_P(obj);
3211 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3212
3213 if (typed_data) {
3214 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3215 }
3216
3217 if (ptr) {
3218 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3219 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3220
3221 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3222 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3223 }
3224 }
3225 else {
3226 RUBY_DATA_FUNC mark_func = typed_data ?
3228 RDATA(obj)->dmark;
3229 if (mark_func) (*mark_func)(ptr);
3230 }
3231 }
3232
3233 break;
3234 }
3235
3236 case T_OBJECT: {
3237 uint32_t len;
3238 if (rb_shape_obj_too_complex_p(obj)) {
3239 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3240 len = ROBJECT_FIELDS_COUNT_COMPLEX(obj);
3241 }
3242 else {
3243 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3244
3245 len = ROBJECT_FIELDS_COUNT_NOT_COMPLEX(obj);
3246 for (uint32_t i = 0; i < len; i++) {
3247 gc_mark_internal(ptr[i]);
3248 }
3249 }
3250
3251 attr_index_t fields_count = (attr_index_t)len;
3252 if (fields_count) {
3253 VALUE klass = RBASIC_CLASS(obj);
3254
3255 // Increment max_iv_count if applicable, used to determine size pool allocation
3256 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3257 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3258 }
3259 }
3260
3261 break;
3262 }
3263
3264 case T_FILE:
3265 if (RFILE(obj)->fptr) {
3266 gc_mark_internal(RFILE(obj)->fptr->self);
3267 gc_mark_internal(RFILE(obj)->fptr->pathv);
3268 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3269 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3270 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3271 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3272 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3273 gc_mark_internal(RFILE(obj)->fptr->timeout);
3274 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3275 }
3276 break;
3277
3278 case T_REGEXP:
3279 gc_mark_internal(RREGEXP(obj)->src);
3280 break;
3281
3282 case T_MATCH:
3283 gc_mark_internal(RMATCH(obj)->regexp);
3284 if (RMATCH(obj)->str) {
3285 gc_mark_internal(RMATCH(obj)->str);
3286 }
3287 break;
3288
3289 case T_RATIONAL:
3290 gc_mark_internal(RRATIONAL(obj)->num);
3291 gc_mark_internal(RRATIONAL(obj)->den);
3292 break;
3293
3294 case T_COMPLEX:
3295 gc_mark_internal(RCOMPLEX(obj)->real);
3296 gc_mark_internal(RCOMPLEX(obj)->imag);
3297 break;
3298
3299 case T_STRUCT: {
3300 const long len = RSTRUCT_LEN(obj);
3301 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3302
3303 for (long i = 0; i < len; i++) {
3304 gc_mark_internal(ptr[i]);
3305 }
3306
3307 if (rb_shape_obj_has_fields(obj) && !FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3308 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3309 }
3310
3311 break;
3312 }
3313
3314 default:
3315 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3316 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3317 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3318 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3319 BUILTIN_TYPE(obj), (void *)obj,
3320 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3321 }
3322}
3323
3324size_t
3325rb_gc_obj_optimal_size(VALUE obj)
3326{
3327 switch (BUILTIN_TYPE(obj)) {
3328 case T_ARRAY:
3329 {
3330 size_t size = rb_ary_size_as_embedded(obj);
3331 if (rb_gc_size_allocatable_p(size)) {
3332 return size;
3333 }
3334 else {
3335 return sizeof(struct RArray);
3336 }
3337 }
3338
3339 case T_OBJECT:
3340 if (rb_shape_obj_too_complex_p(obj)) {
3341 return sizeof(struct RObject);
3342 }
3343 else {
3344 size_t size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3345 if (rb_gc_size_allocatable_p(size)) {
3346 return size;
3347 }
3348 else {
3349 return sizeof(struct RObject);
3350 }
3351 }
3352
3353 case T_STRING:
3354 {
3355 size_t size = rb_str_size_as_embedded(obj);
3356 if (rb_gc_size_allocatable_p(size)) {
3357 return size;
3358 }
3359 else {
3360 return sizeof(struct RString);
3361 }
3362 }
3363
3364 case T_HASH:
3365 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3366
3367 default:
3368 return 0;
3369 }
3370}
3371
3372void
3373rb_gc_writebarrier(VALUE a, VALUE b)
3374{
3375 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3376}
3377
3378void
3379rb_gc_writebarrier_unprotect(VALUE obj)
3380{
3381 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3382}
3383
3384/*
3385 * remember `obj' if needed.
3386 */
3387void
3388rb_gc_writebarrier_remember(VALUE obj)
3389{
3390 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3391}
3392
3393void
3394rb_gc_copy_attributes(VALUE dest, VALUE obj)
3395{
3396 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3397}
3398
3399int
3400rb_gc_modular_gc_loaded_p(void)
3401{
3402#if USE_MODULAR_GC
3403 return rb_gc_functions.modular_gc_loaded_p;
3404#else
3405 return false;
3406#endif
3407}
3408
3409const char *
3410rb_gc_active_gc_name(void)
3411{
3412 const char *gc_name = rb_gc_impl_active_gc_name();
3413
3414 const size_t len = strlen(gc_name);
3415 if (len > RB_GC_MAX_NAME_LEN) {
3416 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3417 RB_GC_MAX_NAME_LEN, len, gc_name);
3418 }
3419
3420 return gc_name;
3421}
3422
3424rb_gc_object_metadata(VALUE obj)
3425{
3426 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3427}
3428
3429/* GC */
3430
3431void *
3432rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3433{
3434 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3435}
3436
3437void
3438rb_gc_ractor_cache_free(void *cache)
3439{
3440 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3441}
3442
3443void
3444rb_gc_register_mark_object(VALUE obj)
3445{
3446 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3447 return;
3448
3449 rb_vm_register_global_object(obj);
3450}
3451
3452void
3453rb_gc_register_address(VALUE *addr)
3454{
3455 rb_vm_t *vm = GET_VM();
3456
3457 VALUE obj = *addr;
3458
3459 struct global_object_list *tmp = ALLOC(struct global_object_list);
3460 tmp->next = vm->global_object_list;
3461 tmp->varptr = addr;
3462 vm->global_object_list = tmp;
3463
3464 /*
3465 * Because some C extensions have assignment-then-register bugs,
3466 * we guard `obj` here so that it would not get swept defensively.
3467 */
3468 RB_GC_GUARD(obj);
3469 if (0 && !SPECIAL_CONST_P(obj)) {
3470 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3471 rb_obj_class(obj));
3472 rb_print_backtrace(stderr);
3473 }
3474}
3475
3476void
3477rb_gc_unregister_address(VALUE *addr)
3478{
3479 rb_vm_t *vm = GET_VM();
3480 struct global_object_list *tmp = vm->global_object_list;
3481
3482 if (tmp->varptr == addr) {
3483 vm->global_object_list = tmp->next;
3484 xfree(tmp);
3485 return;
3486 }
3487 while (tmp->next) {
3488 if (tmp->next->varptr == addr) {
3489 struct global_object_list *t = tmp->next;
3490
3491 tmp->next = tmp->next->next;
3492 xfree(t);
3493 break;
3494 }
3495 tmp = tmp->next;
3496 }
3497}
3498
3499void
3501{
3502 rb_gc_register_address(var);
3503}
3504
3505static VALUE
3506gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3507{
3508 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3509
3510 return Qnil;
3511}
3512
3513/*
3514 * rb_objspace_each_objects() is special C API to walk through
3515 * Ruby object space. This C API is too difficult to use it.
3516 * To be frank, you should not use it. Or you need to read the
3517 * source code of this function and understand what this function does.
3518 *
3519 * 'callback' will be called several times (the number of heap page,
3520 * at current implementation) with:
3521 * vstart: a pointer to the first living object of the heap_page.
3522 * vend: a pointer to next to the valid heap_page area.
3523 * stride: a distance to next VALUE.
3524 *
3525 * If callback() returns non-zero, the iteration will be stopped.
3526 *
3527 * This is a sample callback code to iterate liveness objects:
3528 *
3529 * static int
3530 * sample_callback(void *vstart, void *vend, int stride, void *data)
3531 * {
3532 * VALUE v = (VALUE)vstart;
3533 * for (; v != (VALUE)vend; v += stride) {
3534 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3535 * // do something with live object 'v'
3536 * }
3537 * }
3538 * return 0; // continue to iteration
3539 * }
3540 *
3541 * Note: 'vstart' is not a top of heap_page. This point the first
3542 * living object to grasp at least one object to avoid GC issue.
3543 * This means that you can not walk through all Ruby object page
3544 * including freed object page.
3545 *
3546 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3547 * However, there are possibilities to pass variable values with
3548 * 'stride' with some reasons. You must use stride instead of
3549 * use some constant value in the iteration.
3550 */
3551void
3552rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3553{
3554 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3555}
3556
3557static void
3558gc_ref_update_array(void *objspace, VALUE v)
3559{
3560 if (ARY_SHARED_P(v)) {
3561 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3562
3563 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3564
3565 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3566 // If the root is embedded and its location has changed
3567 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3568 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3569 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3570 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3571 }
3572 }
3573 else {
3574 long len = RARRAY_LEN(v);
3575
3576 if (len > 0) {
3577 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3578 for (long i = 0; i < len; i++) {
3579 UPDATE_IF_MOVED(objspace, ptr[i]);
3580 }
3581 }
3582
3583 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3584 if (rb_ary_embeddable_p(v)) {
3585 rb_ary_make_embedded(v);
3586 }
3587 }
3588 }
3589}
3590
3591static void
3592gc_ref_update_object(void *objspace, VALUE v)
3593{
3594 VALUE *ptr = ROBJECT_FIELDS(v);
3595
3596 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3597 if (rb_shape_obj_too_complex_p(v)) {
3598 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3599 return;
3600 }
3601
3602 size_t slot_size = rb_gc_obj_slot_size(v);
3603 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3604 if (slot_size >= embed_size) {
3605 // Object can be re-embedded
3606 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3607 FL_UNSET_RAW(v, ROBJECT_HEAP);
3608 xfree(ptr);
3609 ptr = ROBJECT(v)->as.ary;
3610 }
3611 }
3612
3613 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3614 UPDATE_IF_MOVED(objspace, ptr[i]);
3615 }
3616}
3617
3618void
3619rb_gc_ref_update_table_values_only(st_table *tbl)
3620{
3621 gc_ref_update_table_values_only(tbl);
3622}
3623
3624/* Update MOVED references in a VALUE=>VALUE st_table */
3625void
3626rb_gc_update_tbl_refs(st_table *ptr)
3627{
3628 gc_update_table_refs(ptr);
3629}
3630
3631static void
3632gc_ref_update_hash(void *objspace, VALUE v)
3633{
3634 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3635}
3636
3637static void
3638gc_update_values(void *objspace, long n, VALUE *values)
3639{
3640 for (long i = 0; i < n; i++) {
3641 UPDATE_IF_MOVED(objspace, values[i]);
3642 }
3643}
3644
3645void
3646rb_gc_update_values(long n, VALUE *values)
3647{
3648 gc_update_values(rb_gc_get_objspace(), n, values);
3649}
3650
3651static enum rb_id_table_iterator_result
3652check_id_table_move(VALUE value, void *data)
3653{
3654 void *objspace = (void *)data;
3655
3656 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3657 return ID_TABLE_REPLACE;
3658 }
3659
3660 return ID_TABLE_CONTINUE;
3661}
3662
3663void
3664rb_gc_prepare_heap_process_object(VALUE obj)
3665{
3666 switch (BUILTIN_TYPE(obj)) {
3667 case T_STRING:
3668 // Precompute the string coderange. This both save time for when it will be
3669 // eventually needed, and avoid mutating heap pages after a potential fork.
3671 break;
3672 default:
3673 break;
3674 }
3675}
3676
3677void
3678rb_gc_prepare_heap(void)
3679{
3680 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3681}
3682
3683size_t
3684rb_gc_heap_id_for_size(size_t size)
3685{
3686 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3687}
3688
3689bool
3690rb_gc_size_allocatable_p(size_t size)
3691{
3692 return rb_gc_impl_size_allocatable_p(size);
3693}
3694
3695static enum rb_id_table_iterator_result
3696update_id_table(VALUE *value, void *data, int existing)
3697{
3698 void *objspace = (void *)data;
3699
3700 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3701 *value = gc_location_internal(objspace, (VALUE)*value);
3702 }
3703
3704 return ID_TABLE_CONTINUE;
3705}
3706
3707static void
3708update_m_tbl(void *objspace, struct rb_id_table *tbl)
3709{
3710 if (tbl) {
3711 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3712 }
3713}
3714
3715static enum rb_id_table_iterator_result
3716update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3717{
3718 struct rb_cvar_class_tbl_entry *entry;
3719
3720 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3721
3722 if (entry->cref) {
3723 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3724 }
3725
3726 entry->class_value = gc_location_internal(objspace, entry->class_value);
3727
3728 return ID_TABLE_CONTINUE;
3729}
3730
3731static void
3732update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3733{
3734 if (!tbl) return;
3735 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3736}
3737
3738static enum rb_id_table_iterator_result
3739update_const_tbl_i(VALUE value, void *objspace)
3740{
3741 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3742
3743 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3744 ce->value = gc_location_internal(objspace, ce->value);
3745 }
3746
3747 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3748 ce->file = gc_location_internal(objspace, ce->file);
3749 }
3750
3751 return ID_TABLE_CONTINUE;
3752}
3753
3754static void
3755update_const_tbl(void *objspace, struct rb_id_table *tbl)
3756{
3757 if (!tbl) return;
3758 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3759}
3760
3761static void
3762update_subclasses(void *objspace, rb_classext_t *ext)
3763{
3764 rb_subclass_entry_t *entry;
3765 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3766 if (!anchor) return;
3767 entry = anchor->head;
3768 while (entry) {
3769 if (entry->klass)
3770 UPDATE_IF_MOVED(objspace, entry->klass);
3771 entry = entry->next;
3772 }
3773}
3774
3775static void
3776update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3777{
3778 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
3779 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
3780 for (size_t i = 0; i < array_size; i++) {
3781 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3782 }
3783 }
3784}
3785
3786static void
3787update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3788{
3789 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3790 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3791 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3792 if (is_iclass) {
3793 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3794 }
3795}
3796
3797static void
3798update_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
3799{
3800 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3801 rb_objspace_t *objspace = args->objspace;
3802
3803 if (RCLASSEXT_SUPER(ext)) {
3804 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3805 }
3806
3807 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3808
3809 UPDATE_IF_MOVED(objspace, ext->fields_obj);
3810 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3811 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3812 }
3813 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3814 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3815 update_superclasses(objspace, ext);
3816 update_subclasses(objspace, ext);
3817
3818 update_classext_values(objspace, ext, false);
3819}
3820
3821static void
3822update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
3823{
3824 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3825 rb_objspace_t *objspace = args->objspace;
3826
3827 if (RCLASSEXT_SUPER(ext)) {
3828 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3829 }
3830 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3831 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3832 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3833 update_subclasses(objspace, ext);
3834
3835 update_classext_values(objspace, ext, true);
3836}
3837
3839 vm_table_foreach_callback_func callback;
3840 vm_table_update_callback_func update_callback;
3841 void *data;
3842 bool weak_only;
3843};
3844
3845static int
3846vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3847{
3848 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3849
3850 int ret = iter_data->callback((VALUE)key, iter_data->data);
3851
3852 if (!iter_data->weak_only) {
3853 if (ret != ST_CONTINUE) return ret;
3854
3855 ret = iter_data->callback((VALUE)value, iter_data->data);
3856 }
3857
3858 return ret;
3859}
3860
3861static int
3862vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3863{
3864 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3865
3866 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3867
3868 if (!iter_data->weak_only) {
3869 if (ret != ST_CONTINUE) return ret;
3870
3871 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3872 }
3873
3874 return ret;
3875}
3876
3877static int
3878vm_weak_table_cc_refinement_foreach(st_data_t key, st_data_t data, int error)
3879{
3880 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3881
3882 return iter_data->callback((VALUE)key, iter_data->data);
3883}
3884
3885static int
3886vm_weak_table_cc_refinement_foreach_update_update(st_data_t *key, st_data_t data, int existing)
3887{
3888 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3889
3890 return iter_data->update_callback((VALUE *)key, iter_data->data);
3891}
3892
3893
3894static int
3895vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
3896{
3897 VALUE sym = *sym_ptr;
3898 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3899
3900 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
3901
3902 int ret = iter_data->callback(sym, iter_data->data);
3903
3904 if (ret == ST_REPLACE) {
3905 ret = iter_data->update_callback(sym_ptr, iter_data->data);
3906 }
3907
3908 return ret;
3909}
3910
3911struct st_table *rb_generic_fields_tbl_get(void);
3912
3913static int
3914vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3915{
3916 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3917
3918 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3919 int ret = iter_data->callback((VALUE)key, iter_data->data);
3920 if (ret != ST_CONTINUE) return ret;
3921 }
3922
3923 return iter_data->callback((VALUE)value, iter_data->data);
3924}
3925
3926static int
3927vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3928{
3929 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3930
3931 iter_data->update_callback((VALUE *)value, iter_data->data);
3932
3933 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
3934 iter_data->update_callback((VALUE *)key, iter_data->data);
3935 }
3936
3937 return ST_CONTINUE;
3938}
3939
3940static int
3941vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
3942{
3943 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3944
3945 int ret = iter_data->callback((VALUE)key, iter_data->data);
3946
3947 VALUE new_value = (VALUE)value;
3948 VALUE new_key = (VALUE)key;
3949
3950 switch (ret) {
3951 case ST_CONTINUE:
3952 break;
3953
3954 case ST_DELETE:
3955 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
3956 return ST_DELETE;
3957
3958 case ST_REPLACE: {
3959 ret = iter_data->update_callback(&new_key, iter_data->data);
3960 if (key != new_key) {
3961 ret = ST_DELETE;
3962 }
3963 break;
3964 }
3965
3966 default:
3967 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
3968 }
3969
3970 if (!iter_data->weak_only) {
3971 int ivar_ret = iter_data->callback(new_value, iter_data->data);
3972 switch (ivar_ret) {
3973 case ST_CONTINUE:
3974 break;
3975
3976 case ST_REPLACE:
3977 iter_data->update_callback(&new_value, iter_data->data);
3978 break;
3979
3980 default:
3981 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
3982 }
3983 }
3984
3985 if (key != new_key || value != new_value) {
3986 DURING_GC_COULD_MALLOC_REGION_START();
3987 {
3988 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
3989 }
3990 DURING_GC_COULD_MALLOC_REGION_END();
3991 }
3992
3993 return ret;
3994}
3995
3996static int
3997vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
3998{
3999 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4000 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4001 int retval = iter_data->callback(*str, iter_data->data);
4002
4003 if (retval == ST_REPLACE) {
4004 retval = iter_data->update_callback(str, iter_data->data);
4005 }
4006
4007 if (retval == ST_DELETE) {
4008 FL_UNSET(*str, RSTRING_FSTR);
4009 }
4010
4011 return retval;
4012}
4013
4014void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4015void
4016rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4017 vm_table_update_callback_func update_callback,
4018 void *data,
4019 bool weak_only,
4020 enum rb_gc_vm_weak_tables table)
4021{
4022 rb_vm_t *vm = GET_VM();
4023
4024 struct global_vm_table_foreach_data foreach_data = {
4025 .callback = callback,
4026 .update_callback = update_callback,
4027 .data = data,
4028 .weak_only = weak_only,
4029 };
4030
4031 switch (table) {
4032 case RB_GC_VM_CI_TABLE: {
4033 if (vm->ci_table) {
4034 st_foreach_with_replace(
4035 vm->ci_table,
4036 vm_weak_table_foreach_weak_key,
4037 vm_weak_table_foreach_update_weak_key,
4038 (st_data_t)&foreach_data
4039 );
4040 }
4041 break;
4042 }
4043 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4044 if (vm->overloaded_cme_table) {
4045 st_foreach_with_replace(
4046 vm->overloaded_cme_table,
4047 vm_weak_table_foreach_weak_key,
4048 vm_weak_table_foreach_update_weak_key,
4049 (st_data_t)&foreach_data
4050 );
4051 }
4052 break;
4053 }
4054 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4055 rb_sym_global_symbol_table_foreach_weak_reference(
4056 vm_weak_table_sym_set_foreach,
4057 &foreach_data
4058 );
4059 break;
4060 }
4061 case RB_GC_VM_ID2REF_TABLE: {
4062 if (id2ref_tbl) {
4063 st_foreach_with_replace(
4064 id2ref_tbl,
4065 vm_weak_table_id2ref_foreach,
4066 vm_weak_table_id2ref_foreach_update,
4067 (st_data_t)&foreach_data
4068 );
4069 }
4070 break;
4071 }
4072 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4073 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4074 if (generic_fields_tbl) {
4075 st_foreach(
4076 generic_fields_tbl,
4077 vm_weak_table_gen_fields_foreach,
4078 (st_data_t)&foreach_data
4079 );
4080 }
4081 break;
4082 }
4083 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4084 rb_fstring_foreach_with_replace(
4085 vm_weak_table_frozen_strings_foreach,
4086 &foreach_data
4087 );
4088 break;
4089 }
4090 case RB_GC_VM_CC_REFINEMENT_TABLE: {
4091 if (vm->cc_refinement_table) {
4092 set_foreach_with_replace(
4093 vm->cc_refinement_table,
4094 vm_weak_table_cc_refinement_foreach,
4095 vm_weak_table_cc_refinement_foreach_update_update,
4096 (st_data_t)&foreach_data
4097 );
4098 }
4099 break;
4100 }
4101 case RB_GC_VM_WEAK_TABLE_COUNT:
4102 rb_bug("Unreachable");
4103 default:
4104 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4105 }
4106}
4107
4108void
4109rb_gc_update_vm_references(void *objspace)
4110{
4111 rb_execution_context_t *ec = GET_EC();
4112 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4113
4114 rb_vm_update_references(vm);
4115 rb_gc_update_global_tbl();
4116 rb_sym_global_symbols_mark_and_move();
4117
4118#if USE_YJIT
4119 void rb_yjit_root_update_references(void); // in Rust
4120
4121 if (rb_yjit_enabled_p) {
4122 rb_yjit_root_update_references();
4123 }
4124#endif
4125
4126#if USE_ZJIT
4127 void rb_zjit_root_update_references(void); // in Rust
4128
4129 if (rb_zjit_enabled_p) {
4130 rb_zjit_root_update_references();
4131 }
4132#endif
4133}
4134
4135void
4136rb_gc_update_object_references(void *objspace, VALUE obj)
4137{
4138 struct classext_foreach_args args;
4139
4140 switch (BUILTIN_TYPE(obj)) {
4141 case T_CLASS:
4142 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4143 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4144 }
4145 // Continue to the shared T_CLASS/T_MODULE
4146 case T_MODULE:
4147 args.klass = obj;
4148 args.objspace = objspace;
4149 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4150 break;
4151
4152 case T_ICLASS:
4153 args.objspace = objspace;
4154 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4155 break;
4156
4157 case T_IMEMO:
4158 rb_imemo_mark_and_move(obj, true);
4159 return;
4160
4161 case T_NIL:
4162 case T_FIXNUM:
4163 case T_NODE:
4164 case T_MOVED:
4165 case T_NONE:
4166 /* These can't move */
4167 return;
4168
4169 case T_ARRAY:
4170 gc_ref_update_array(objspace, obj);
4171 break;
4172
4173 case T_HASH:
4174 gc_ref_update_hash(objspace, obj);
4175 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4176 break;
4177
4178 case T_STRING:
4179 {
4180 if (STR_SHARED_P(obj)) {
4181 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4182 }
4183
4184 /* If, after move the string is not embedded, and can fit in the
4185 * slot it's been placed in, then re-embed it. */
4186 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4187 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4188 rb_str_make_embedded(obj);
4189 }
4190 }
4191
4192 break;
4193 }
4194 case T_DATA:
4195 /* Call the compaction callback, if it exists */
4196 {
4197 bool typed_data = RTYPEDDATA_P(obj);
4198 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4199
4200 if (typed_data) {
4201 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4202 }
4203
4204 if (ptr) {
4205 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4206 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4207
4208 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4209 VALUE *ref = (VALUE *)((char *)ptr + offset);
4210 *ref = gc_location_internal(objspace, *ref);
4211 }
4212 }
4213 else if (typed_data) {
4214 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4215 if (compact_func) (*compact_func)(ptr);
4216 }
4217 }
4218 }
4219 break;
4220
4221 case T_OBJECT:
4222 gc_ref_update_object(objspace, obj);
4223 break;
4224
4225 case T_FILE:
4226 if (RFILE(obj)->fptr) {
4227 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4228 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4229 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4230 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4231 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4232 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4233 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4234 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4235 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4236 }
4237 break;
4238 case T_REGEXP:
4239 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4240 break;
4241
4242 case T_SYMBOL:
4243 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4244 break;
4245
4246 case T_FLOAT:
4247 case T_BIGNUM:
4248 break;
4249
4250 case T_MATCH:
4251 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4252
4253 if (RMATCH(obj)->str) {
4254 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4255 }
4256 break;
4257
4258 case T_RATIONAL:
4259 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4260 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4261 break;
4262
4263 case T_COMPLEX:
4264 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4265 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4266
4267 break;
4268
4269 case T_STRUCT:
4270 {
4271 long i, len = RSTRUCT_LEN(obj);
4272 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4273
4274 for (i = 0; i < len; i++) {
4275 UPDATE_IF_MOVED(objspace, ptr[i]);
4276 }
4277
4278 if (RSTRUCT_EMBED_LEN(obj)) {
4279 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4280 UPDATE_IF_MOVED(objspace, ptr[len]);
4281 }
4282 }
4283 else {
4284 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4285 }
4286 }
4287 break;
4288 default:
4289 rb_bug("unreachable");
4290 break;
4291 }
4292
4293 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4294}
4295
4296VALUE
4297rb_gc_start(void)
4298{
4299 rb_gc();
4300 return Qnil;
4301}
4302
4303void
4304rb_gc(void)
4305{
4306 unless_objspace(objspace) { return; }
4307
4308 rb_gc_impl_start(objspace, true, true, true, false);
4309}
4310
4311int
4312rb_during_gc(void)
4313{
4314 unless_objspace(objspace) { return FALSE; }
4315
4316 return rb_gc_impl_during_gc_p(objspace);
4317}
4318
4319size_t
4320rb_gc_count(void)
4321{
4322 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4323}
4324
4325static VALUE
4326gc_count(rb_execution_context_t *ec, VALUE self)
4327{
4328 return SIZET2NUM(rb_gc_count());
4329}
4330
4331VALUE
4332rb_gc_latest_gc_info(VALUE key)
4333{
4334 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4335 rb_raise(rb_eTypeError, "non-hash or symbol given");
4336 }
4337
4338 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4339
4340 if (val == Qundef) {
4341 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4342 }
4343
4344 return val;
4345}
4346
4347static VALUE
4348gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4349{
4350 if (NIL_P(arg)) {
4351 arg = rb_hash_new();
4352 }
4353 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4354 rb_raise(rb_eTypeError, "non-hash or symbol given");
4355 }
4356
4357 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4358
4359 if (ret == Qundef) {
4360 GC_ASSERT(SYMBOL_P(arg));
4361
4362 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4363 }
4364
4365 return ret;
4366}
4367
4368size_t
4369rb_gc_stat(VALUE arg)
4370{
4371 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4372 rb_raise(rb_eTypeError, "non-hash or symbol given");
4373 }
4374
4375 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4376
4377 if (ret == Qundef) {
4378 GC_ASSERT(SYMBOL_P(arg));
4379
4380 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4381 }
4382
4383 if (SYMBOL_P(arg)) {
4384 return NUM2SIZET(ret);
4385 }
4386 else {
4387 return 0;
4388 }
4389}
4390
4391static VALUE
4392gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4393{
4394 if (NIL_P(arg)) {
4395 arg = rb_hash_new();
4396 }
4397
4398 if (NIL_P(heap_name)) {
4399 if (!RB_TYPE_P(arg, T_HASH)) {
4400 rb_raise(rb_eTypeError, "non-hash given");
4401 }
4402 }
4403 else if (FIXNUM_P(heap_name)) {
4404 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4405 rb_raise(rb_eTypeError, "non-hash or symbol given");
4406 }
4407 }
4408 else {
4409 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4410 }
4411
4412 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4413
4414 if (ret == Qundef) {
4415 GC_ASSERT(SYMBOL_P(arg));
4416
4417 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4418 }
4419
4420 return ret;
4421}
4422
4423static VALUE
4424gc_config_get(rb_execution_context_t *ec, VALUE self)
4425{
4426 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4427 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4428
4429 return cfg_hash;
4430}
4431
4432static VALUE
4433gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4434{
4435 void *objspace = rb_gc_get_objspace();
4436
4437 rb_gc_impl_config_set(objspace, hash);
4438
4439 return Qnil;
4440}
4441
4442static VALUE
4443gc_stress_get(rb_execution_context_t *ec, VALUE self)
4444{
4445 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4446}
4447
4448static VALUE
4449gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4450{
4451 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4452
4453 return flag;
4454}
4455
4456void
4457rb_gc_initial_stress_set(VALUE flag)
4458{
4459 initial_stress = flag;
4460}
4461
4462size_t *
4463rb_gc_heap_sizes(void)
4464{
4465 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4466}
4467
4468VALUE
4469rb_gc_enable(void)
4470{
4471 return rb_objspace_gc_enable(rb_gc_get_objspace());
4472}
4473
4474VALUE
4475rb_objspace_gc_enable(void *objspace)
4476{
4477 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4478 rb_gc_impl_gc_enable(objspace);
4479 return RBOOL(disabled);
4480}
4481
4482static VALUE
4483gc_enable(rb_execution_context_t *ec, VALUE _)
4484{
4485 return rb_gc_enable();
4486}
4487
4488static VALUE
4489gc_disable_no_rest(void *objspace)
4490{
4491 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4492 rb_gc_impl_gc_disable(objspace, false);
4493 return RBOOL(disabled);
4494}
4495
4496VALUE
4497rb_gc_disable_no_rest(void)
4498{
4499 return gc_disable_no_rest(rb_gc_get_objspace());
4500}
4501
4502VALUE
4503rb_gc_disable(void)
4504{
4505 return rb_objspace_gc_disable(rb_gc_get_objspace());
4506}
4507
4508VALUE
4509rb_objspace_gc_disable(void *objspace)
4510{
4511 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4512 rb_gc_impl_gc_disable(objspace, true);
4513 return RBOOL(disabled);
4514}
4515
4516static VALUE
4517gc_disable(rb_execution_context_t *ec, VALUE _)
4518{
4519 return rb_gc_disable();
4520}
4521
4522// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4523void
4524ruby_gc_set_params(void)
4525{
4526 rb_gc_impl_set_params(rb_gc_get_objspace());
4527}
4528
4529void
4530rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4531{
4532 RB_VM_LOCKING() {
4533 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4534
4535 if (!RB_SPECIAL_CONST_P(obj)) {
4536 rb_vm_t *vm = GET_VM();
4537 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4538 struct gc_mark_func_data_struct mfd = {
4539 .mark_func = func,
4540 .data = data,
4541 };
4542
4543 vm->gc.mark_func_data = &mfd;
4544 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4545 vm->gc.mark_func_data = prev_mfd;
4546 }
4547 }
4548}
4549
4551 const char *category;
4552 void (*func)(const char *category, VALUE, void *);
4553 void *data;
4554};
4555
4556static void
4557root_objects_from(VALUE obj, void *ptr)
4558{
4559 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4560 (*data->func)(data->category, obj, data->data);
4561}
4562
4563void
4564rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4565{
4566 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4567
4568 rb_vm_t *vm = GET_VM();
4569
4570 struct root_objects_data data = {
4571 .func = func,
4572 .data = passing_data,
4573 };
4574
4575 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4576 struct gc_mark_func_data_struct mfd = {
4577 .mark_func = root_objects_from,
4578 .data = &data,
4579 };
4580
4581 vm->gc.mark_func_data = &mfd;
4582 rb_gc_save_machine_context();
4583 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4584 vm->gc.mark_func_data = prev_mfd;
4585}
4586
4587/*
4588 ------------------------------ DEBUG ------------------------------
4589*/
4590
4591static const char *
4592type_name(int type, VALUE obj)
4593{
4594 switch (type) {
4595#define TYPE_NAME(t) case (t): return #t;
4596 TYPE_NAME(T_NONE);
4597 TYPE_NAME(T_OBJECT);
4598 TYPE_NAME(T_CLASS);
4599 TYPE_NAME(T_MODULE);
4600 TYPE_NAME(T_FLOAT);
4601 TYPE_NAME(T_STRING);
4602 TYPE_NAME(T_REGEXP);
4603 TYPE_NAME(T_ARRAY);
4604 TYPE_NAME(T_HASH);
4605 TYPE_NAME(T_STRUCT);
4606 TYPE_NAME(T_BIGNUM);
4607 TYPE_NAME(T_FILE);
4608 TYPE_NAME(T_MATCH);
4609 TYPE_NAME(T_COMPLEX);
4610 TYPE_NAME(T_RATIONAL);
4611 TYPE_NAME(T_NIL);
4612 TYPE_NAME(T_TRUE);
4613 TYPE_NAME(T_FALSE);
4614 TYPE_NAME(T_SYMBOL);
4615 TYPE_NAME(T_FIXNUM);
4616 TYPE_NAME(T_UNDEF);
4617 TYPE_NAME(T_IMEMO);
4618 TYPE_NAME(T_ICLASS);
4619 TYPE_NAME(T_MOVED);
4620 TYPE_NAME(T_ZOMBIE);
4621 case T_DATA:
4622 if (obj && rb_objspace_data_type_name(obj)) {
4623 return rb_objspace_data_type_name(obj);
4624 }
4625 return "T_DATA";
4626#undef TYPE_NAME
4627 }
4628 return "unknown";
4629}
4630
4631static const char *
4632obj_type_name(VALUE obj)
4633{
4634 return type_name(TYPE(obj), obj);
4635}
4636
4637const char *
4638rb_method_type_name(rb_method_type_t type)
4639{
4640 switch (type) {
4641 case VM_METHOD_TYPE_ISEQ: return "iseq";
4642 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4643 case VM_METHOD_TYPE_IVAR: return "ivar";
4644 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4645 case VM_METHOD_TYPE_ALIAS: return "alias";
4646 case VM_METHOD_TYPE_REFINED: return "refined";
4647 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4648 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4649 case VM_METHOD_TYPE_MISSING: return "missing";
4650 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4651 case VM_METHOD_TYPE_UNDEF: return "undef";
4652 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4653 }
4654 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4655}
4656
4657static void
4658rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4659{
4660 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4661 VALUE path = rb_iseq_path(iseq);
4662 int n = ISEQ_BODY(iseq)->location.first_lineno;
4663 snprintf(buff, buff_size, " %s@%s:%d",
4664 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4665 RSTRING_PTR(path), n);
4666 }
4667}
4668
4669static int
4670str_len_no_raise(VALUE str)
4671{
4672 long len = RSTRING_LEN(str);
4673 if (len < 0) return 0;
4674 if (len > INT_MAX) return INT_MAX;
4675 return (int)len;
4676}
4677
4678#define BUFF_ARGS buff + pos, buff_size - pos
4679#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4680#define APPEND_S(s) do { \
4681 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4682 goto end; \
4683 } \
4684 else { \
4685 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4686 } \
4687 } while (0)
4688#define C(c, s) ((c) != 0 ? (s) : " ")
4689
4690static size_t
4691rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4692{
4693 size_t pos = 0;
4694
4695 if (SPECIAL_CONST_P(obj)) {
4696 APPEND_F("%s", obj_type_name(obj));
4697
4698 if (FIXNUM_P(obj)) {
4699 APPEND_F(" %ld", FIX2LONG(obj));
4700 }
4701 else if (SYMBOL_P(obj)) {
4702 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4703 }
4704 }
4705 else {
4706 // const int age = RVALUE_AGE_GET(obj);
4707
4708 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4709 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4710 // TODO: fixme
4711 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4712 // (void *)obj, age,
4713 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4714 // C(RVALUE_MARK_BITMAP(obj), "M"),
4715 // C(RVALUE_PIN_BITMAP(obj), "P"),
4716 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4717 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4718 // C(rb_objspace_garbage_object_p(obj), "G"),
4719 // obj_type_name(obj));
4720 }
4721 else {
4722 /* fake */
4723 // APPEND_F("%p [%dXXXX] %s",
4724 // (void *)obj, age,
4725 // obj_type_name(obj));
4726 }
4727
4728 if (internal_object_p(obj)) {
4729 /* ignore */
4730 }
4731 else if (RBASIC(obj)->klass == 0) {
4732 APPEND_S("(temporary internal)");
4733 }
4734 else if (RTEST(RBASIC(obj)->klass)) {
4735 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4736 if (!NIL_P(class_path)) {
4737 APPEND_F("%s ", RSTRING_PTR(class_path));
4738 }
4739 }
4740 }
4741 end:
4742
4743 return pos;
4744}
4745
4746const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4747
4748static size_t
4749rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4750{
4751 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4752 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4753
4754 switch (type) {
4755 case T_NODE:
4756 UNEXPECTED_NODE(rb_raw_obj_info);
4757 break;
4758 case T_ARRAY:
4759 if (ARY_SHARED_P(obj)) {
4760 APPEND_S("shared -> ");
4761 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4762 }
4763 else {
4764 APPEND_F("[%s%s%s] ",
4765 C(ARY_EMBED_P(obj), "E"),
4766 C(ARY_SHARED_P(obj), "S"),
4767 C(ARY_SHARED_ROOT_P(obj), "R"));
4768
4769 if (ARY_EMBED_P(obj)) {
4770 APPEND_F("len: %ld (embed)",
4771 RARRAY_LEN(obj));
4772 }
4773 else {
4774 APPEND_F("len: %ld, capa:%ld ptr:%p",
4775 RARRAY_LEN(obj),
4776 RARRAY(obj)->as.heap.aux.capa,
4777 (void *)RARRAY_CONST_PTR(obj));
4778 }
4779 }
4780 break;
4781 case T_STRING: {
4782 if (STR_SHARED_P(obj)) {
4783 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4784 }
4785 else {
4786 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4787
4788 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4789 }
4790 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4791 break;
4792 }
4793 case T_SYMBOL: {
4794 VALUE fstr = RSYMBOL(obj)->fstr;
4795 ID id = RSYMBOL(obj)->id;
4796 if (RB_TYPE_P(fstr, T_STRING)) {
4797 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4798 }
4799 else {
4800 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4801 }
4802 break;
4803 }
4804 case T_MOVED: {
4805 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4806 break;
4807 }
4808 case T_HASH: {
4809 APPEND_F("[%c] %"PRIdSIZE,
4810 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4811 RHASH_SIZE(obj));
4812 break;
4813 }
4814 case T_CLASS:
4815 case T_MODULE:
4816 {
4817 VALUE class_path = rb_class_path_cached(obj);
4818 if (!NIL_P(class_path)) {
4819 APPEND_F("%s", RSTRING_PTR(class_path));
4820 }
4821 else {
4822 APPEND_S("(anon)");
4823 }
4824 break;
4825 }
4826 case T_ICLASS:
4827 {
4828 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4829 if (!NIL_P(class_path)) {
4830 APPEND_F("src:%s", RSTRING_PTR(class_path));
4831 }
4832 break;
4833 }
4834 case T_OBJECT:
4835 {
4836 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
4837 if (rb_shape_obj_too_complex_p(obj)) {
4838 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4839 APPEND_F("(too_complex) len:%zu", hash_len);
4840 }
4841 else {
4842 APPEND_F("(embed) len:%d", ROBJECT_FIELDS_CAPACITY(obj));
4843 }
4844 }
4845 else {
4846 APPEND_F("len:%d ptr:%p", ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
4847 }
4848 }
4849 break;
4850 case T_DATA: {
4851 const struct rb_block *block;
4852 const rb_iseq_t *iseq;
4853 if (rb_obj_is_proc(obj) &&
4854 (block = vm_proc_block(obj)) != NULL &&
4855 (vm_block_type(block) == block_type_iseq) &&
4856 (iseq = vm_block_iseq(block)) != NULL) {
4857 rb_raw_iseq_info(BUFF_ARGS, iseq);
4858 }
4859 else if (rb_ractor_p(obj)) {
4860 rb_ractor_t *r = (void *)DATA_PTR(obj);
4861 if (r) {
4862 APPEND_F("r:%d", r->pub.id);
4863 }
4864 }
4865 else {
4866 const char * const type_name = rb_objspace_data_type_name(obj);
4867 if (type_name) {
4868 APPEND_F("%s", type_name);
4869 }
4870 }
4871 break;
4872 }
4873 case T_IMEMO: {
4874 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4875
4876 switch (imemo_type(obj)) {
4877 case imemo_ment:
4878 {
4879 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4880
4881 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4882 rb_id2name(me->called_id),
4883 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4884 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4885 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4886 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4887 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4888 me->def ? rb_method_type_name(me->def->type) : "NULL",
4889 me->def ? me->def->aliased : -1,
4890 (void *)me->owner, // obj_info(me->owner),
4891 (void *)me->defined_class); //obj_info(me->defined_class)));
4892
4893 if (me->def) {
4894 switch (me->def->type) {
4895 case VM_METHOD_TYPE_ISEQ:
4896 APPEND_S(" (iseq:");
4897 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4898 APPEND_S(")");
4899 break;
4900 default:
4901 break;
4902 }
4903 }
4904
4905 break;
4906 }
4907 case imemo_iseq: {
4908 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4909 rb_raw_iseq_info(BUFF_ARGS, iseq);
4910 break;
4911 }
4912 case imemo_callinfo:
4913 {
4914 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4915 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4916 rb_id2name(vm_ci_mid(ci)),
4917 vm_ci_flag(ci),
4918 vm_ci_argc(ci),
4919 vm_ci_kwarg(ci) ? "available" : "NULL");
4920 break;
4921 }
4922 case imemo_callcache:
4923 {
4924 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4925 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
4926 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4927
4928 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4929 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4930 cme ? rb_id2name(cme->called_id) : "<NULL>",
4931 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4932 (void *)cme,
4933 (void *)(uintptr_t)vm_cc_call(cc));
4934 break;
4935 }
4936 default:
4937 break;
4938 }
4939 }
4940 default:
4941 break;
4942 }
4943 }
4944 end:
4945
4946 return pos;
4947}
4948
4949#undef C
4950
4951#ifdef RUBY_ASAN_ENABLED
4952void
4953rb_asan_poison_object(VALUE obj)
4954{
4955 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4956 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4957}
4958
4959void
4960rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4961{
4962 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4963 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4964}
4965
4966void *
4967rb_asan_poisoned_object_p(VALUE obj)
4968{
4969 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4970 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4971}
4972#endif
4973
4974static void
4975raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4976{
4977 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4978 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4979 if (pos >= buff_size) {} // truncated
4980}
4981
4982const char *
4983rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4984{
4985 void *objspace = rb_gc_get_objspace();
4986
4987 if (SPECIAL_CONST_P(obj)) {
4988 raw_obj_info(buff, buff_size, obj);
4989 }
4990 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
4991 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
4992 }
4993#if 0 // maybe no need to check it?
4994 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
4995 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
4996 }
4997#endif
4998 else {
4999 asan_unpoisoning_object(obj) {
5000 raw_obj_info(buff, buff_size, obj);
5001 }
5002 }
5003 return buff;
5004}
5005
5006#undef APPEND_S
5007#undef APPEND_F
5008#undef BUFF_ARGS
5009
5010/* Increments *var atomically and resets *var to 0 when maxval is
5011 * reached. Returns the wraparound old *var value (0...maxval). */
5012static rb_atomic_t
5013atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5014{
5015 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5016 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5017 const rb_atomic_t newval = oldval + 1;
5018 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5019 oldval %= maxval;
5020 }
5021 return oldval;
5022}
5023
5024static const char *
5025obj_info(VALUE obj)
5026{
5027 if (RGENGC_OBJ_INFO) {
5028 static struct {
5029 rb_atomic_t index;
5030 char buffers[10][0x100];
5031 } info = {0};
5032
5033 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5034 char *const buff = info.buffers[index];
5035 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5036 }
5037 return obj_type_name(obj);
5038}
5039
5040/*
5041 ------------------------ Extended allocator ------------------------
5042*/
5043
5045 VALUE exc;
5046 const char *fmt;
5047 va_list *ap;
5048};
5049
5050static void *
5051gc_vraise(void *ptr)
5052{
5053 struct gc_raise_tag *argv = ptr;
5054 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5055 UNREACHABLE_RETURN(NULL);
5056}
5057
5058static void
5059gc_raise(VALUE exc, const char *fmt, ...)
5060{
5061 va_list ap;
5062 va_start(ap, fmt);
5063 struct gc_raise_tag argv = {
5064 exc, fmt, &ap,
5065 };
5066
5067 if (ruby_native_thread_p()) {
5068 rb_thread_call_with_gvl(gc_vraise, &argv);
5070 }
5071 else {
5072 /* Not in a ruby thread */
5073 fprintf(stderr, "%s", "[FATAL] ");
5074 vfprintf(stderr, fmt, ap);
5075 }
5076
5077 va_end(ap);
5078 abort();
5079}
5080
5081NORETURN(static void negative_size_allocation_error(const char *));
5082static void
5083negative_size_allocation_error(const char *msg)
5084{
5085 gc_raise(rb_eNoMemError, "%s", msg);
5086}
5087
5088static void *
5089ruby_memerror_body(void *dummy)
5090{
5091 rb_memerror();
5092 return 0;
5093}
5094
5095NORETURN(static void ruby_memerror(void));
5097static void
5098ruby_memerror(void)
5099{
5100 if (ruby_thread_has_gvl_p()) {
5101 rb_memerror();
5102 }
5103 else {
5104 if (ruby_native_thread_p()) {
5105 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5106 }
5107 else {
5108 /* no ruby thread */
5109 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5110 }
5111 }
5112
5113 /* We have discussions whether we should die here; */
5114 /* We might rethink about it later. */
5115 exit(EXIT_FAILURE);
5116}
5117
5118void
5119rb_memerror(void)
5120{
5121 /* the `GET_VM()->special_exceptions` below assumes that
5122 * the VM is reachable from the current thread. We should
5123 * definitely make sure of that. */
5124 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5125
5126 rb_execution_context_t *ec = GET_EC();
5127 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5128
5129 if (!exc ||
5130 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5131 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5132 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5133 exit(EXIT_FAILURE);
5134 }
5135 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5136 rb_ec_raised_clear(ec);
5137 }
5138 else {
5139 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5140 exc = ruby_vm_special_exception_copy(exc);
5141 }
5142 ec->errinfo = exc;
5143 EC_JUMP_TAG(ec, TAG_RAISE);
5144}
5145
5146bool
5147rb_memerror_reentered(void)
5148{
5149 rb_execution_context_t *ec = GET_EC();
5150 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5151}
5152
5153static void *
5154handle_malloc_failure(void *ptr)
5155{
5156 if (LIKELY(ptr)) {
5157 return ptr;
5158 }
5159 else {
5160 ruby_memerror();
5161 UNREACHABLE_RETURN(ptr);
5162 }
5163}
5164
5165static void *ruby_xmalloc_body(size_t size);
5166
5167void *
5168ruby_xmalloc(size_t size)
5169{
5170 return handle_malloc_failure(ruby_xmalloc_body(size));
5171}
5172
5173static bool
5174malloc_gc_allowed(void)
5175{
5176 rb_ractor_t *r = rb_current_ractor_raw(false);
5177
5178 return r == NULL || !r->malloc_gc_disabled;
5179}
5180
5181static void *
5182ruby_xmalloc_body(size_t size)
5183{
5184 if ((ssize_t)size < 0) {
5185 negative_size_allocation_error("too large allocation size");
5186 }
5187
5188 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5189}
5190
5191void
5192ruby_malloc_size_overflow(size_t count, size_t elsize)
5193{
5194 rb_raise(rb_eArgError,
5195 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5196 count, elsize);
5197}
5198
5199void
5200ruby_malloc_add_size_overflow(size_t x, size_t y)
5201{
5202 rb_raise(rb_eArgError,
5203 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5204 x, y);
5205}
5206
5207static void *ruby_xmalloc2_body(size_t n, size_t size);
5208
5209void *
5210ruby_xmalloc2(size_t n, size_t size)
5211{
5212 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5213}
5214
5215static void *
5216ruby_xmalloc2_body(size_t n, size_t size)
5217{
5218 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5219}
5220
5221static void *ruby_xcalloc_body(size_t n, size_t size);
5222
5223void *
5224ruby_xcalloc(size_t n, size_t size)
5225{
5226 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5227}
5228
5229static void *
5230ruby_xcalloc_body(size_t n, size_t size)
5231{
5232 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5233}
5234
5235static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5236
5237#ifdef ruby_sized_xrealloc
5238#undef ruby_sized_xrealloc
5239#endif
5240void *
5241ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5242{
5243 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5244}
5245
5246static void *
5247ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5248{
5249 if ((ssize_t)new_size < 0) {
5250 negative_size_allocation_error("too large allocation size");
5251 }
5252
5253 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5254}
5255
5256void *
5257ruby_xrealloc(void *ptr, size_t new_size)
5258{
5259 return ruby_sized_xrealloc(ptr, new_size, 0);
5260}
5261
5262static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5263
5264#ifdef ruby_sized_xrealloc2
5265#undef ruby_sized_xrealloc2
5266#endif
5267void *
5268ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5269{
5270 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5271}
5272
5273static void *
5274ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5275{
5276 size_t len = xmalloc2_size(n, size);
5277 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5278}
5279
5280void *
5281ruby_xrealloc2(void *ptr, size_t n, size_t size)
5282{
5283 return ruby_sized_xrealloc2(ptr, n, size, 0);
5284}
5285
5286#ifdef ruby_sized_xfree
5287#undef ruby_sized_xfree
5288#endif
5289void
5290ruby_sized_xfree(void *x, size_t size)
5291{
5292 if (LIKELY(x)) {
5293 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5294 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5295 * that case. */
5296 if (LIKELY(GET_VM())) {
5297 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5298 }
5299 else {
5300 ruby_mimfree(x);
5301 }
5302 }
5303}
5304
5305void
5306ruby_xfree(void *x)
5307{
5308 ruby_sized_xfree(x, 0);
5309}
5310
5311void *
5312rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5313{
5314 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5315 return ruby_xmalloc(w);
5316}
5317
5318void *
5319rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5320{
5321 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5322 return ruby_xcalloc(w, 1);
5323}
5324
5325void *
5326rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5327{
5328 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5329 return ruby_xrealloc((void *)p, w);
5330}
5331
5332void *
5333rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5334{
5335 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5336 return ruby_xmalloc(u);
5337}
5338
5339void *
5340rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5341{
5342 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5343 return ruby_xcalloc(u, 1);
5344}
5345
5346/* Mimic ruby_xmalloc, but need not rb_objspace.
5347 * should return pointer suitable for ruby_xfree
5348 */
5349void *
5350ruby_mimmalloc(size_t size)
5351{
5352 void *mem;
5353#if CALC_EXACT_MALLOC_SIZE
5354 size += sizeof(struct malloc_obj_info);
5355#endif
5356 mem = malloc(size);
5357#if CALC_EXACT_MALLOC_SIZE
5358 if (!mem) {
5359 return NULL;
5360 }
5361 else
5362 /* set 0 for consistency of allocated_size/allocations */
5363 {
5364 struct malloc_obj_info *info = mem;
5365 info->size = 0;
5366 mem = info + 1;
5367 }
5368#endif
5369 return mem;
5370}
5371
5372void *
5373ruby_mimcalloc(size_t num, size_t size)
5374{
5375 void *mem;
5376#if CALC_EXACT_MALLOC_SIZE
5377 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5378 if (UNLIKELY(t.left)) {
5379 return NULL;
5380 }
5381 size = t.right + sizeof(struct malloc_obj_info);
5382 mem = calloc1(size);
5383 if (!mem) {
5384 return NULL;
5385 }
5386 else
5387 /* set 0 for consistency of allocated_size/allocations */
5388 {
5389 struct malloc_obj_info *info = mem;
5390 info->size = 0;
5391 mem = info + 1;
5392 }
5393#else
5394 mem = calloc(num, size);
5395#endif
5396 return mem;
5397}
5398
5399void
5400ruby_mimfree(void *ptr)
5401{
5402#if CALC_EXACT_MALLOC_SIZE
5403 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5404 ptr = info;
5405#endif
5406 free(ptr);
5407}
5408
5409void
5410rb_gc_adjust_memory_usage(ssize_t diff)
5411{
5412 unless_objspace(objspace) { return; }
5413
5414 rb_gc_impl_adjust_memory_usage(objspace, diff);
5415}
5416
5417const char *
5418rb_obj_info(VALUE obj)
5419{
5420 return obj_info(obj);
5421}
5422
5423void
5424rb_obj_info_dump(VALUE obj)
5425{
5426 char buff[0x100];
5427 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5428}
5429
5430void
5431rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5432{
5433 char buff[0x100];
5434 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5435}
5436
5437void
5438rb_gc_before_fork(void)
5439{
5440 rb_gc_impl_before_fork(rb_gc_get_objspace());
5441}
5442
5443void
5444rb_gc_after_fork(rb_pid_t pid)
5445{
5446 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5447}
5448
5449bool
5450rb_gc_obj_shareable_p(VALUE obj)
5451{
5452 return RB_OBJ_SHAREABLE_P(obj);
5453}
5454
5455void
5456rb_gc_rp(VALUE obj)
5457{
5458 rp(obj);
5459}
5460
5462 VALUE parent;
5463 long err_count;
5464};
5465
5466static void
5467check_shareable_i(const VALUE child, void *ptr)
5468{
5469 struct check_shareable_data *data = (struct check_shareable_data *)ptr;
5470
5471 if (!rb_gc_obj_shareable_p(child)) {
5472 fprintf(stderr, "(a) ");
5473 rb_gc_rp(data->parent);
5474 fprintf(stderr, "(b) ");
5475 rb_gc_rp(child);
5476 fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
5477
5478 data->err_count++;
5479 rb_bug("!! violate shareable constraint !!");
5480 }
5481}
5482
5483static bool gc_checking_shareable = false;
5484
5485static void
5486gc_verify_shareable(void *objspace, VALUE obj, void *data)
5487{
5488 // while gc_checking_shareable is true,
5489 // other Ractors should not run the GC, until the flag is not local.
5490 // TODO: remove VM locking if the flag is Ractor local
5491
5492 unsigned int lev = RB_GC_VM_LOCK();
5493 {
5494 gc_checking_shareable = true;
5495 rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
5496 gc_checking_shareable = false;
5497 }
5498 RB_GC_VM_UNLOCK(lev);
5499}
5500
5501// TODO: only one level (non-recursive)
5502void
5503rb_gc_verify_shareable(VALUE obj)
5504{
5505 rb_objspace_t *objspace = rb_gc_get_objspace();
5506 struct check_shareable_data data = {
5507 .parent = obj,
5508 .err_count = 0,
5509 };
5510 gc_verify_shareable(objspace, obj, &data);
5511
5512 if (data.err_count > 0) {
5513 rb_bug("rb_gc_verify_shareable");
5514 }
5515}
5516
5517bool
5518rb_gc_checking_shareable(void)
5519{
5520 return gc_checking_shareable;
5521}
5522
5523/*
5524 * Document-module: ObjectSpace
5525 *
5526 * The ObjectSpace module contains a number of routines
5527 * that interact with the garbage collection facility and allow you to
5528 * traverse all living objects with an iterator.
5529 *
5530 * ObjectSpace also provides support for object finalizers, procs that will be
5531 * called after a specific object was destroyed by garbage collection. See
5532 * the documentation for +ObjectSpace.define_finalizer+ for important
5533 * information on how to use this method correctly.
5534 *
5535 * a = "A"
5536 * b = "B"
5537 *
5538 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5539 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5540 *
5541 * a = nil
5542 * b = nil
5543 *
5544 * _produces:_
5545 *
5546 * Finalizer two on 537763470
5547 * Finalizer one on 537763480
5548 */
5549
5550/* Document-class: GC::Profiler
5551 *
5552 * The GC profiler provides access to information on GC runs including time,
5553 * length and object space size.
5554 *
5555 * Example:
5556 *
5557 * GC::Profiler.enable
5558 *
5559 * require 'rdoc/rdoc'
5560 *
5561 * GC::Profiler.report
5562 *
5563 * GC::Profiler.disable
5564 *
5565 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5566 */
5567
5568#include "gc.rbinc"
5569
5570void
5571Init_GC(void)
5572{
5573#undef rb_intern
5574 rb_gc_register_address(&id2ref_value);
5575
5576 malloc_offset = gc_compute_malloc_offset();
5577
5578 rb_mGC = rb_define_module("GC");
5579
5580 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5581
5582 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5583
5584 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5585 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5586
5587 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5588
5589 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5590
5591 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5592 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5593
5594 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5595
5596 rb_gc_impl_init();
5597}
5598
5599// Set a name for the anonymous virtual memory area. `addr` is the starting
5600// address of the area and `size` is its length in bytes. `name` is a
5601// NUL-terminated human-readable string.
5602//
5603// This function is usually called after calling `mmap()`. The human-readable
5604// annotation helps developers identify the call site of `mmap()` that created
5605// the memory mapping.
5606//
5607// This function currently only works on Linux 5.17 or higher. After calling
5608// this function, we can see annotations in the form of "[anon:...]" in
5609// `/proc/self/maps`, where `...` is the content of `name`. This function has
5610// no effect when called on other platforms.
5611void
5612ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5613{
5614#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5615 // The name length cannot exceed 80 (including the '\0').
5616 RUBY_ASSERT(strlen(name) < 80);
5617 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5618 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5619 // reasons.
5620 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5621 // 2. addr is an invalid address.
5622 // 3. The string pointed by name is too long.
5623 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5624 // happen if we run the compiled binary on an old kernel. In theory, all
5625 // other errors should result in a failure. But since EINVAL cannot tell
5626 // the first error from others, and this function is mainly used for
5627 // debugging, we silently ignore the error.
5628 errno = 0;
5629#endif
5630}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1701
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3237
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:133
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2549
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2589
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1442
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1435
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_mGC
GC module.
Definition gc.c:424
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:264
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:923
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:94
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3287
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:945
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1750
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:999
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1292
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1624
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1630
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3350
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5669
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2049
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1063
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1078
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:575
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1102
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1112
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:461
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:95
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:598
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9026
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5779
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby's array.
Definition rarray.h:128
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:85
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:358
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:378
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:364
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:238
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:259
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:215
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:229
struct rb_data_type_struct::@55 function
Function pointers.
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:317
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113